code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
from setuptools import setup
from gun import __version__
setup(
name = 'gun',
version = __version__,
description = 'Gentoo Updates Notifier',
author = 'Andriy Yurchuk',
author_email = 'ayurchuk@minuteware.net',
url = 'https://github.com/Ch00k/gun',
license = 'LICENSE.txt',
long_description = open('README.rst').read(),
entry_points = {
'console_scripts': [
'gun = gun.sync:main'
]
},
packages = ['gun'],
data_files = [('/etc/gun/', ['data/gun.conf'])],
install_requires = ['xmpppy >= 0.5.0-rc1']
) | Ch00k/gun | setup.py | Python | mit | 728 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
import os
from setuptools import setup, find_packages
def get_version():
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, 'instapush/version.py')) as f:
locals = {}
exec(f.read(), locals)
return locals['VERSION']
raise RuntimeError('No version info found.')
setup(
name='instapush',
version = get_version(),
keywords = ('instapush', 'tools'),
description = 'a python wrapper for instapush',
license = 'MIT License',
url = 'https://github.com/adamwen829/instapush-py',
author = 'Adam Wen',
author_email = 'adamwen829@gmail.com',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires = ['requests']
)
| adamwen829/instapush-py | setup.py | Python | mit | 894 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 17:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='volunteer',
old_name='picture',
new_name='avatar',
),
]
| NewsNerdsAtCoJMC/ProjectTicoTeam6 | service/volunteers/migrations/0002_auto_20170314_1712.py | Python | mit | 424 |
import command
command_list = ["joke","weather","play","pause","stop","skip","light","security","created","name","mood","selfie"]
def interpret(s):
print "meow"
for cmd in command_list:
if s.find(cmd) != -1:
print cmd
return command.Command(cmd).do()
| TrevorEdwards/Jauffre | jauffre/interpreter.py | Python | mit | 284 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from wtforms.fields import StringField
from wtforms.validators import DataRequired
from wtforms_sqlalchemy.fields import QuerySelectField
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.tracks.models.groups import TrackGroup
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import IndicoMarkdownField
class TrackForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
code = StringField(_('Code'))
track_group = QuerySelectField(_('Track group'), default='', allow_blank=True, get_label='title',
description=_('Select a track group to which this track should belong'))
default_session = QuerySelectField(_('Default session'), default='', allow_blank=True, get_label='title',
description=_('Indico will preselect this session whenever an abstract is '
'accepted for the track'))
description = IndicoMarkdownField(_('Description'), editor=True)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super().__init__(*args, **kwargs)
self.default_session.query = Session.query.with_parent(event)
self.track_group.query = TrackGroup.query.with_parent(event)
class ProgramForm(IndicoForm):
program = IndicoMarkdownField(_('Program'), editor=True, mathjax=True)
@generated_data
def program_render_mode(self):
return RenderMode.markdown
class TrackGroupForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = IndicoMarkdownField(_('Description'), editor=True)
| indico/indico | indico/modules/events/tracks/forms.py | Python | mit | 2,016 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pysia documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pysia
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySia'
copyright = u"2017, Jeffrey McLarty"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pysia.__version__
# The full version, including alpha/beta/rc tags.
release = pysia.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysiadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pysia.tex',
u'PySia Documentation',
u'Jeffrey McLarty', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysia',
u'PySia Documentation',
[u'Jeffrey McLarty'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysia',
u'PySia Documentation',
u'Jeffrey McLarty',
'pysia',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| jnmclarty/pysia | docs/conf.py | Python | mit | 8,373 |
"""
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
DJ_PROJECT_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(DJ_PROJECT_DIR)
WSGI_DIR = os.path.dirname(BASE_DIR)
REPO_DIR = os.path.dirname(WSGI_DIR)
DATA_DIR = os.environ.get('OPENSHIFT_DATA_DIR', BASE_DIR)
import sys
sys.path.append(os.path.join(REPO_DIR, 'libs'))
import secrets
SECRETS = secrets.getter(os.path.join(DATA_DIR, 'secrets.json'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = SECRETS['secret_key']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG') == 'True'
from socket import gethostname
ALLOWED_HOSTS = [
gethostname(), # For internal OpenShift load balancer security purposes.
os.environ.get('OPENSHIFT_APP_DNS'), # Dynamically map to the OpenShift gear name.
#'example.com', # First DNS alias (set up in the app)
#'www.example.com', # Second DNS alias (set up in the app)
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'devices',
'devices.bpm'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# GETTING-STARTED: change 'myproject' to your project name:
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'api', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': os.environ.get('OPENSHIFT_POSTGRESQL_DB_USERNAME'),
'PASSWORD': os.environ.get('OPENSHIFT_POSTGRESQL_DB_PASSWORD'),
'HOST': os.environ.get('OPENSHIFT_POSTGRESQL_DB_HOST'), # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': os.environ.get('OPENSHIFT_POSTGRESQL_DB_PORT'), # Set to empty string for default.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(WSGI_DIR, 'static')
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CORS_ORIGIN_ALLOW_ALL = True
FIREBASE_URL = 'https://ubervest.firebaseio.com' | easyCZ/SLIP-A-2015 | api/wsgi/src/project/settings.py | Python | mit | 4,305 |
from distutils.core import setup
long_description = """
`termtool` helps you write subcommand-based command line tools in Python. It collects several Python libraries into a declarative syntax:
* `argparse`, the argument parsing module with subcommand support provided in the standard library in Python 2.7 and later.
* `prettytable <http://code.google.com/p/python-progressbar/>`_, an easy module for building tables of information.
* `progressbar <http://code.google.com/p/python-progressbar/>`_, a handy module for displaying progress bars.
* `logging`, the simple built-in module for logging messages.
"""
setup(
name='termtool',
version='1.1',
description='Declarative terminal tool programming',
author='Mark Paschal',
author_email='markpasc@markpasc.org',
url='https://github.com/markpasc/termtool',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
packages=[],
py_modules=['termtool'],
requires=['argparse', 'PrettyTable', 'progressbar'],
)
| markpasc/termtool | setup.py | Python | mit | 1,440 |
from django.conf.urls import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'schwag.views.home', name='home'),
url(r'^about/$', 'schwag.views.about', name='about'),
url(r'^location/$', 'schwag.views.location', name='location'),
url(r'^contact/$', 'schwag.views.contact', name='contact'),
url(r'^bmx/$', 'schwag.views.bmx', name='bmx'),
url(r'^account/login/$', 'schwag.views.login', name='login'),
url(r'^account/logout/$', 'schwag.views.logout', name='logout'),
url(r'^account/register/$', 'schwag.views.register', name='register'),
url(r'^account/', include('django.contrib.auth.urls')),
url(r'^checkout/', include('senex_shop.checkout.urls')),
url(r'^cart/', include('senex_shop.cart.urls')),
url(r'^shop/', include('senex_shop.urls')),
url(r'^news/', include('senex_shop.news.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
except ImportError:
pass
if settings.DEBUG:
urlpatterns += patterns('django.views.static',
(r'media/(?P<path>.*)', 'serve', {'document_root': settings.MEDIA_ROOT}),
) | endthestart/schwag | schwag/schwag/urls.py | Python | mit | 1,849 |
#!/usr/bin/env python
from tasks import *
from math import cos, sin, pi
import rospy
from signal import signal, SIGINT
from geometry_msgs.msg import Point
import sys
def signal_handler(signal, frame):
print('You pressed Ctrl+C')
print('Leaving the Controller & closing the UAV')
Controller.__exit__()
sys.exit(0)
def PointsInCircum(r,z=3,n=8):
#return (x,y,z) points of a circle
#PointsInCircum(Rayon(m), Altitude{defaut:3}(m), NombreDePoints{defaut:8})
return [(round(cos(2*pi/n*x)*r,3),round(sin(2*pi/n*x)*r,3),z) for x in range(0,n+1)]
rospy.init_node('test_tasks')
Controller = taskController(rate=3, setpoint_rate=10)
rospy.loginfo("Controller initiated")
signal(SIGINT, signal_handler)
#Initialisation
tasks = []
rospy.loginfo("Circle sequencing")
tasks.append(init_UAV("Init UAV"))
tasks.append(arm("Arming"))
tasks.append(takeoff("TAKEOFF"))
#Targetting circle points
CirclePoints = PointsInCircum(3)
for circle in CirclePoints:
tasks.append(target("target", Point(circle[0], circle[1], circle[2])))
tasks.append(land("LANDING"))
#Disarming
tasks.append(disarm("Disarming"))
#Adding tasks
Controller.addTasks(tasks)
rospy.loginfo("Tasks added")
# for i in range(100):
while True:
Controller.rate.sleep()
Controller.spinOnce()
rospy.loginfo("Task %s on %s : %s", Controller.current+1, Controller.count, Controller.getCurrentTask().__str__())
Controller.__exit__()
sys.exit(0)
| AlexisTM/Indoor_Position_lasers | laserpack/bin/Scenari/scenario_circle.py | Python | mit | 1,452 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ComplianceStatus
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorResponse
from ._models_py3 import HelmOperatorProperties
from ._models_py3 import ProxyResource
from ._models_py3 import Resource
from ._models_py3 import ResourceProviderOperation
from ._models_py3 import ResourceProviderOperationDisplay
from ._models_py3 import ResourceProviderOperationList
from ._models_py3 import Result
from ._models_py3 import SourceControlConfiguration
from ._models_py3 import SourceControlConfigurationList
from ._models_py3 import SystemData
except (SyntaxError, ImportError):
from ._models import ComplianceStatus # type: ignore
from ._models import ErrorDefinition # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import HelmOperatorProperties # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceProviderOperation # type: ignore
from ._models import ResourceProviderOperationDisplay # type: ignore
from ._models import ResourceProviderOperationList # type: ignore
from ._models import Result # type: ignore
from ._models import SourceControlConfiguration # type: ignore
from ._models import SourceControlConfigurationList # type: ignore
from ._models import SystemData # type: ignore
from ._source_control_configuration_client_enums import (
ComplianceStateType,
CreatedByType,
Enum0,
Enum1,
MessageLevelType,
OperatorScopeType,
OperatorType,
ProvisioningStateType,
)
__all__ = [
'ComplianceStatus',
'ErrorDefinition',
'ErrorResponse',
'HelmOperatorProperties',
'ProxyResource',
'Resource',
'ResourceProviderOperation',
'ResourceProviderOperationDisplay',
'ResourceProviderOperationList',
'Result',
'SourceControlConfiguration',
'SourceControlConfigurationList',
'SystemData',
'ComplianceStateType',
'CreatedByType',
'Enum0',
'Enum1',
'MessageLevelType',
'OperatorScopeType',
'OperatorType',
'ProvisioningStateType',
]
| Azure/azure-sdk-for-python | sdk/kubernetesconfiguration/azure-mgmt-kubernetesconfiguration/azure/mgmt/kubernetesconfiguration/v2021_03_01/models/__init__.py | Python | mit | 2,687 |
from __future__ import (
unicode_literals,
absolute_import,
division,
print_function,
)
# Make Py2's str type like Py3's
str = type('')
# Rules that take into account part of speech to alter text
structure_rules = [
((["JJ*","NN*"],),
(["chuffing",0,1],),
0.1),
((["."],),
(["our","kid",0],["init",0],["and","that",0],["and","stuff",0]),
0.1),
((["NN"],),
(["thing"],),
0.05),
((["START"],),
([0,"here","yar","."],),
0.05),
]
# Words to be ignored by the translator
ignores = [ "i","a","be","will" ]
# Direct word substitutions
word_rules = [
(("and",),
("n'",)),
(("of",),
("ov",)),
(("her",),
("'er",)),
(("my",),
("me",)),
(("what",),
("wot",)),
(("our",),
("ah",)),
(("acceptable","ace","awesome","brilliant","excellent","fantastic","good",
"great","likable","lovely","super","smashing","nice","pleasing",
"rad","superior","worthy","admirable","agreeable","commendable",
"congenial","deluxe","honorable","honourable","neat","precious",
"reputable","splendid","stupendous","exceptional","favorable",
"favourable","marvelous","satisfactory","satisfying","valuable",
"wonderful","fine","perfect","special","exciting","amazing","succeeded",
"worked","successful"),
("buzzin'","top","mint","boss","sound","fit","sweet","madferit","safe","raz",
"bob on","bangin'","peach","bazzin'","kewl","quality")),
(("anything",),
("owt",)),
(("nothing","none","zero","blank","null","void","nought",),
("nowt",)),
(("disappointed","unhappy","sad","melancholy",),
("gutted",)),
(("break","damage","smash","crack","destroy","annihilate","obliterate",
"corrupt","ruin","spoil","wreck","trash","fail",),
("knacker","bugger",)),
(("bad","poor","rubbish","broken","errored","damaged","atrocious","awful",
"cheap","crummy","dreadful","lousy","rough","unacceptable",
"garbage","inferior","abominable","amiss","beastly","careless",
"cheesy","crap","crappy","cruddy","defective","deficient",
"erroneous","faulty","incorrect","inadequate","substandard",
"unsatisfactory","dysfunctional","malfunctioning","corrupt","failed",),
("naff","shit","knackered","buggered","pants","pear-shaped","tits up",
"ragged","devilled","out of order","bang out of order","biz","kippered",
"bobbins")),
(("error","mistake","problem",),
("cock up","balls up")),
(("very","exceedingly","mostly","sheer","exceptionally","genuinely",
"especially","really"),
("well","bare","pure","dead","proper",)),
(("numerous","many","all","most",),
("bare","pure",)),
(("mad","crazy","insane","crazed","kooky","nuts","nutty","silly","wacky",
"beserk","cuckoo","potty","batty","bonkers","unhinged","mental",
"idiotic","stupid","moronic","dumb","foolish",),
("barmy",)),
(("delighted","pleased","happy","cheerful","contented","ecstatic","elated",
"glad","joyful","joyous","jubilant","lively","merry","overjoyed",
"peaceful","pleasant","pleased","thrilled","upbeat","blessed",
"blest","blissful","captivated","gleeful","gratified","jolly",
"mirthful","playful","proud",),
("chuffed","buzzin'")),
(("things","stuff","elements","parts","pieces","facts","subjects","situations",
"concepts","concerns","items","materials","objects","files",),
("shit",)),
(("attractive","alluring","beautiful","charming","engaging","enticing",
"glamorous","gorgeous","handsome","inviting","tempting","adorable",
"agreeable","enchanting","enthralling","hunky","pretty","seductive",
"provocative","tantalizing","teasing","stunning",),
("fit",)),
(("any",),
("whatever",)),
(("unattractive","ugly","horrible","nasty","unpleasant","hideous","gross",
"unsightly","horrid","unseemly","grisly","awful","foul","repelling",
"repulsive","repugnant","revolting","uninviting","monstrous",),
("mingin'","rancid","'angin","rank","manky")),
(("fast","quick","swift","brief",),
("rapid",)),
(("pound",),
("quid","squid",)),
(("man",),
("bloke", "fella",)),
(("men",),
("blokes", "fellas",)),
(("mate", "friend"),
("pal","mate",)),
(("hello","greetings","welcome","hi","howdy",),
("arrite","how do","hiya",)),
(("bye","goodbye","farewell",),
("ta-ra",)),
(("kiss",),
("snog",)),
(("sandwich",),
("butty","barm")),
(("sandwiches",),
("butties","barms")),
(("eat","consume","absorb","digest","food","sustinance",),
("scran",)),
(("lunch",),
("dinner",)),
(("dinner",),
("tea",)),
(("you",),
("youse",)),
(("idiot","moron","fool","buffoon","clown","jerk","nerd","nitwit","stooge",
"sucker","twit","clod","cretin","dolt","dope","dunce","oaf","twerp",
"imbecile","ignoramus","loon","ninny","numskull",),
("scrote","muppet","knobber","spanner","gonk","cabbage")),
(("police","law","cop","cops","policeman","policewoman","constable","officer",
"detective","bobby","copper",),
("dibble",)),
(("house","dwelling","appartment","building","home","mansion","residence",
"shack","abode","castle","cave","coop","flat","habitation","pad",
"residency","place",),
("gaff",)),
(("was",),
("were",)),
(("were",),
("was",)),
(("yes","ok",),
("aye",)),
(("are",),
("iz",)),
(("no",),
("nah",)),
(("haven't",),
("ain't",)),
(("right",),
("reet",)),
(("the",),
("t'",)),
(("?",),
("eh?","or wot?","yeah?")),
]
# Alterations to the sound of a word based on its consonant and vowel sounds
phoneme_rules = [
((["START","HH"],),
["START","'"]),
((["ER","END"],),
["AA","'","END"]),
((["T","END"],),
["'","END"],),
((["AE","R"],),
["AE"]),
((["AA","R"],),
["AE","R"]),
((["AH1"],),
["UW"],),
((["AO","R","END"],["UH","R","END"],),
["AH","R"]),
((["AO"],),
["AA"],),
((["NG","END"],),
["N","'","END"]),
((["T","UW","END"],),
["T","AH","END"]),
((["START","DH"],),
["START","D"]),
((["TH","END"],),
["F","END"],),
((["DH","END"],),
["V","END"],),
((["START","TH"],),
["START","F"]),
((["VOWEL","T","VOWEL"],),
[0,"R",2]),
]
if __name__ == "__main__":
import re,random,sys
text = sys.argv[1]
for patts,repls in words:
for patt in patts:
text = re.sub(r'\b'+patt+r'\b',lambda m: random.choice(repls),text)
print(text)
| safehammad/mancify | mancify/dialects/manc.py | Python | mit | 7,080 |
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import threading
from wpwithin.WPWithinCallback import Client
from wpwithin.WPWithinCallback import Processor
class CallbackHandler:
def __init__(self):
self.log = {}
def beginServiceDelivery(self, serviceId, serviceDeliveryToken, unitsToSupply):
try:
print "event from core - onBeginServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsToSupply: {0}\n".format(unitsToSupply)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doBeginServiceDelivery failed: " + str(e)
def endServiceDelivery(self, serviceId, serviceDeliveryToken, unitsReceived):
try:
print "event from core - onEndServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsReceived: {0}\n".format(unitsReceived)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doEndServiceDelivery failed: " + str(e)
class EventServer:
server = None
def startServer(self, server):
print "##### STARTING WRAPPER SERVER to receive callbacks #####"
print "##### SERVER: " + str(server)
server.serve()
def stop():
if server != None:
server.setShouldStop(True)
def __init__(self, listenerHandler, hostname, port):
try:
if(listenerHandler == None):
print "Using build-in handler"
theListenerToUse = CallbackHandler()
else:
print "Using custom handler"
theListenerToUse = listenerHandler
processor = Processor(theListenerToUse)
transport = TSocket.TServerSocket(host=hostname, port=port)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
#self.server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
self.server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
print "Serving the Wrapper listener, port: " + str(port)
thread = threading.Thread(target=self.startServer, args=([self.server]))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
print "##### SERVER: " + str(self.server)
print "##### SERVER: SHOULD HAVE STARTED"
print "Should have started Wrapper listener"
except Exception as e:
print "Event server setup failed: " + str(e)
| WPTechInnovation/worldpay-within-sdk | wrappers/python_2-7/EventServer.py | Python | mit | 3,460 |
"""Base command for search-related management commands."""
from __future__ import annotations
import argparse
import builtins
import logging
from typing import Any, Optional, Union
from django.core.management.base import BaseCommand
from elasticsearch.exceptions import TransportError
CommandReturnType = Optional[Union[list, dict]]
logger = logging.getLogger(__name__)
class BaseSearchCommand(BaseCommand):
"""Base class for commands that interact with the search index."""
description = "Base search command."
def _confirm_action(self) -> bool:
"""Return True if the user confirms the action."""
msg = "Are you sure you wish to continue? [y/N] "
return builtins.input(msg).lower().startswith("y")
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
"""Add default base options of --noinput and indexes."""
parser.add_argument(
"-f",
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Do no display user prompts - may affect data.",
)
parser.add_argument(
"indexes", nargs="*", help="Names of indexes on which to run the command."
)
def do_index_command(self, index: str, **options: Any) -> CommandReturnType:
"""Run a command against a named index."""
raise NotImplementedError()
def handle(self, *args: Any, **options: Any) -> None:
"""Run do_index_command on each specified index and log the output."""
for index in options.pop("indexes"):
try:
data = self.do_index_command(index, **options)
except TransportError as ex:
logger.warning("ElasticSearch threw an error: %s", ex)
data = {"index": index, "status": ex.status_code, "reason": ex.error}
except FileNotFoundError as ex:
logger.warning("Mapping file not found: %s", ex)
data = {
"index": index,
"status": "N/A",
"reason": "Mapping file not found",
}
except Exception as ex: # noqa: B902
logger.warning("Error running command: %s", ex)
data = {
"index": index,
"status": "N/A",
"reason": str(ex),
}
finally:
logger.info(data)
| yunojuno/elasticsearch-django | elasticsearch_django/management/commands/__init__.py | Python | mit | 2,479 |
"""Tests for GP and SP classes"""
import math
import unittest
import numpy as np
from gpkit import (Model, Monomial, settings, VectorVariable, Variable,
SignomialsEnabled, ArrayVariable)
from gpkit.geometric_program import GeometricProgram
from gpkit.small_classes import CootMatrix
from gpkit.feasibility import feasibility_model
NDIGS = {"cvxopt": 5, "mosek": 7, "mosek_cli": 5}
# name: decimal places of accuracy
class TestGP(unittest.TestCase):
"""
Test GeometricPrograms.
This TestCase gets run once for each installed solver.
"""
name = "TestGP_"
# solver and ndig get set in loop at bottom this file, a bit hacky
solver = None
ndig = None
def test_trivial_gp(self):
"""
Create and solve a trivial GP:
minimize x + 2y
subject to xy >= 1
The global optimum is (x, y) = (sqrt(2), 1/sqrt(2)).
"""
x = Monomial('x')
y = Monomial('y')
prob = Model(cost=(x + 2*y),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(type(prob.latex()), str)
self.assertEqual(type(prob._repr_latex_()), str)
self.assertAlmostEqual(sol("x"), math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("y"), 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol("x") + 2*sol("y"),
2*math.sqrt(2),
self.ndig)
self.assertAlmostEqual(sol["cost"], 2*math.sqrt(2), self.ndig)
def test_simple_united_gp(self):
R = Variable('R', units="nautical_miles")
a0 = Variable('a0', 340.29, 'm/s')
theta = Variable(r'\theta', 0.7598)
t = Variable('t', 10, 'hr')
T_loiter = Variable('T_{loiter}', 1, 'hr')
T_reserve = Variable('T_{reserve}', 45, 'min')
M = VectorVariable(2, 'M')
if R.units:
prob = Model(1/R,
[t >= sum(R/a0/M/theta**0.5) + T_loiter + T_reserve,
M <= 0.76])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 0.0005532, self.ndig)
def test_trivial_vector_gp(self):
"""
Create and solve a trivial GP with VectorVariables
"""
x = VectorVariable(2, 'x')
y = VectorVariable(2, 'y')
prob = Model(cost=(sum(x) + 2*sum(y)),
constraints=[x*y >= 1])
sol = prob.solve(solver=self.solver, verbosity=0)
self.assertEqual(sol('x').shape, (2,))
self.assertEqual(sol('y').shape, (2,))
for x, y in zip(sol('x'), sol('y')):
self.assertAlmostEqual(x, math.sqrt(2.), self.ndig)
self.assertAlmostEqual(y, 1/math.sqrt(2.), self.ndig)
self.assertAlmostEqual(sol["cost"]/(4*math.sqrt(2)), 1., self.ndig)
def test_zero_lower_unbounded(self):
x = Variable('x', value=4)
y = Variable('y', value=0)
z = Variable('z')
t1 = Variable('t1')
t2 = Variable('t2')
prob = Model(z, [z >= x + t1,
t1 >= t2,
t2 >= y])
sol = prob.solve(verbosity=0)
def test_mdd_example(self):
Cl = Variable("Cl", 0.5, "-", "Lift Coefficient")
Mdd = Variable("Mdd", "-", "Drag Divergence Mach Number")
m1 = Model(1/Mdd, [1 >= 5*Mdd + 0.5, Mdd >= 0.00001])
m2 = Model(1/Mdd, [1 >= 5*Mdd + 0.5])
m3 = Model(1/Mdd, [1 >= 5*Mdd + Cl, Mdd >= 0.00001])
sol1 = m1.solve(solver=self.solver, verbosity=0)
sol2 = m2.solve(solver=self.solver, verbosity=0)
sol3 = m3.solve(solver=self.solver, verbosity=0)
gp1, gp2, gp3 = [m.program for m in [m1, m2, m3]]
self.assertEqual(gp1.A, CootMatrix(row=[0, 1, 2],
col=[0, 0, 0],
data=[-1, 1, -1]))
self.assertEqual(gp2.A, CootMatrix(row=[0, 1],
col=[0, 0],
data=[-1, 1]))
# order of variables within a posynomial is not stable
# (though monomial order is)
equiv1 = gp3.A == CootMatrix(row=[0, 2, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
equiv2 = gp3.A == CootMatrix(row=[0, 1, 3, 2],
col=[0, 0, 0, 0],
data=[-1, 1, -1, 0])
self.assertTrue(equiv1 or equiv2)
self.assertAlmostEqual(sol1(Mdd), sol2(Mdd))
self.assertAlmostEqual(sol1(Mdd), sol3(Mdd))
self.assertAlmostEqual(sol2(Mdd), sol3(Mdd))
def test_additive_constants(self):
x = Variable('x')
m = Model(1/x, [1 >= 5*x + 0.5, 1 >= 10*x])
m.solve(verbosity=0)
gp = m.program
self.assertEqual(gp.cs[1], gp.cs[2])
self.assertEqual(gp.A.data[1], gp.A.data[2])
def test_zeroing(self):
L = Variable("L")
k = Variable("k", 0)
with SignomialsEnabled():
constr = [L-5*k <= 10]
sol = Model(1/L, constr).solve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol(L), 10, self.ndig)
self.assertAlmostEqual(sol["cost"], 0.1, self.ndig)
def test_singular(self):
"""
Create and solve GP with a singular A matrix
"""
if self.solver == 'cvxopt':
# cvxopt can't solve this problem
# (see https://github.com/cvxopt/cvxopt/issues/36)
return
x = Variable('x')
y = Variable('y')
m = Model(y*x, [y*x >= 12])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 12, self.ndig)
def test_constants_in_objective_1(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(1.+ x1 + x2, [x1 >= 1., x2 >= 1.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"], 3, self.ndig)
def test_constants_in_objective_2(self):
'''Issue 296'''
x1 = Variable('x1')
x2 = Variable('x2')
m = Model(x1**2 + 100 + 3*x2, [x1 >= 10., x2 >= 15.])
sol = m.solve(solver=self.solver, verbosity=0)
self.assertAlmostEqual(sol["cost"]/245., 1, self.ndig)
def test_feasibility_gp_(self):
x = Variable('x')
m = Model(x, [x**2 >= 1, x <= 0.5])
self.assertRaises(RuntimeWarning, m.solve, verbosity=0)
fm = feasibility_model(m, "max")
sol1 = fm.solve(verbosity=0)
fm = feasibility_model(m, "product")
sol2 = fm.solve(verbosity=0)
self.assertTrue(sol1["cost"] >= 1)
self.assertTrue(sol2["cost"] >= 1)
def test_terminating_constant_(self):
x = Variable('x')
y = Variable('y', value=0.5)
prob = Model(1/x, [x + y <= 4])
sol = prob.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1/3.5, self.ndig)
def test_check_result(self):
"""issue 361"""
N = 5
L = 5.
dx = L/(N-1)
EI = Variable("EI",10)
p = VectorVariable(N, "p")
p = p.sub(p, 100*np.ones(N))
V = VectorVariable(N, "V")
M = VectorVariable(N, "M")
th = VectorVariable(N, "th")
w = VectorVariable(N, "w")
eps = 1E-6
substitutions = {var: eps for var in [V[-1], M[-1], th[0], w[0]]}
objective = w[-1]
constraints = [EI*V.left[1:N] >= EI*V[1:N] + 0.5*dx*p.left[1:N] + 0.5*dx*p[1:N],
EI*M.left[1:N] >= EI*M[1:N] + 0.5*dx*V.left[1:N] + 0.5*dx*V[1:N],
EI*th.right[0:N-1] >= EI*th[0:N-1] + 0.5*dx*M.right[0:N-1] + 0.5*dx*M[0:N-1],
EI*w.right[0:N-1] >= EI*w[0:N-1] + 0.5*dx*th.right[0:N-1] + 0.5*dx*th[0:N-1]]
m = Model(objective, constraints, substitutions)
sol = m.solve(verbosity=0)
def test_exps_is_tuple(self):
"""issue 407"""
x = Variable('x')
m = Model(x, [x >= 1])
m.solve(verbosity=0)
self.assertEqual(type(m.program.cost.exps), tuple)
class TestSP(unittest.TestCase):
"""test case for SP class -- gets run for each installed solver"""
name = "TestSP_"
solver = None
ndig = None
def test_trivial_sp(self):
x = Variable('x')
y = Variable('y')
with SignomialsEnabled():
m = Model(x, [x >= 1-y, y <= 0.1])
sol = m.localsolve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol["variables"]["x"], 0.9, self.ndig)
with SignomialsEnabled():
m = Model(x, [x+y >= 1, y <= 0.1])
sol = m.localsolve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol["variables"]["x"], 0.9, self.ndig)
def test_relaxation(self):
x = Variable("x")
y = Variable("y")
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
m = Model(objective, constraints)
m.localsolve(verbosity=0)
# issue #257
A = VectorVariable(2, "A")
B = ArrayVariable([2, 2], "B")
C = VectorVariable(2, "C")
with SignomialsEnabled():
constraints = [A <= B.dot(C),
B <= 1,
C <= 1]
obj = 1/A[0] + 1/A[1]
m = Model(obj, constraints)
m.localsolve(verbosity=0)
def test_issue180(self):
L = Variable("L")
Lmax = Variable("L_{max}", 10)
W = Variable("W")
Wmax = Variable("W_{max}", 10)
A = Variable("A", 10)
Obj = Variable("Obj")
a_val = 0.01
a = Variable("a", a_val)
with SignomialsEnabled():
eqns = [L <= Lmax,
W <= Wmax,
L*W >= A,
Obj >= a*(2*L + 2*W) + (1-a)*(12 * W**-1 * L**-3)]
m = Model(Obj, eqns)
spsol = m.solve(verbosity=0, solver=self.solver)
# now solve as GP
eqns[-1] = (Obj >= a_val*(2*L + 2*W) + (1-a_val)*(12 * W**-1 * L**-3))
m = Model(Obj, eqns)
gpsol = m.solve(verbosity=0, solver=self.solver)
self.assertAlmostEqual(spsol['cost'], gpsol['cost'])
def test_trivial_sp2(self):
x = Variable("x")
y = Variable("y")
# converging from above
with SignomialsEnabled():
constraints = [y + x >= 2, y >= x]
objective = y
x0 = 1
y0 = 2
m = Model(objective, constraints)
sol1 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)
# converging from right
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
x0 = 2
y0 = 1
m = Model(objective, constraints)
sol2 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)
self.assertAlmostEqual(sol1["variables"]["x"],
sol2["variables"]["x"], self.ndig)
self.assertAlmostEqual(sol1["variables"]["y"],
sol2["variables"]["x"], self.ndig)
def test_sp_initial_guess_sub(self):
x = Variable("x")
y = Variable("y")
x0 = 1
y0 = 2
with SignomialsEnabled():
constraints = [y + x >= 2, y <= x]
objective = x
m = Model(objective, constraints)
try:
sol = m.localsolve(x0={x: x0, y: y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with only variables failed")
self.assertAlmostEqual(sol(x), 1, self.ndig)
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
try:
sol = m.localsolve(x0={"x": x0, "y": y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with only variable strings failed")
self.assertAlmostEqual(sol("x"), 1, self.ndig)
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
try:
sol = m.localsolve(x0={"x": x0, y: y0}, verbosity=0,
solver=self.solver)
except TypeError:
self.fail("Call to local solve with a mix of variable strings "
"and variables failed")
self.assertAlmostEqual(sol["cost"], 1, self.ndig)
def test_small_signomial(self):
x = Variable('x')
z = Variable('z')
local_ndig = 4
nonzero_adder = 0.1 # TODO: support reaching zero, issue #348
with SignomialsEnabled():
J = 0.01*(x - 1)**2 + nonzero_adder
m = Model(z, [z >= J])
sol = m.localsolve(verbosity=0)
self.assertAlmostEqual(sol['cost'], nonzero_adder, local_ndig)
self.assertAlmostEqual(sol('x'), 0.987, 3)
def test_signomials_not_allowed_in_objective(self):
with SignomialsEnabled():
x = Variable('x')
y = Variable('y')
J = 0.01*((x - 1)**2 + (y - 1)**2) + (x*y - 1)**2
m = Model(J)
with self.assertRaises(TypeError):
sol = m.localsolve(verbosity=0)
def test_partial_sub_signomial(self):
"""Test SP partial x0 initialization"""
x = Variable('x')
y = Variable('y')
with SignomialsEnabled():
m = Model(x, [x + y >= 1, y <= 0.5])
m.localsolve(x0={x: 0.5}, verbosity=0)
self.assertEqual(m.program.gps[0].constraints[0].exp[x], -1./3)
TEST_CASES = [TestGP, TestSP]
TESTS = []
for testcase in TEST_CASES:
for solver in settings["installed_solvers"]:
if solver:
test = type(testcase.__name__+"_"+solver,
(testcase,), {})
setattr(test, "solver", solver)
setattr(test, "ndig", NDIGS[solver])
TESTS.append(test)
if __name__ == "__main__":
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| galbramc/gpkit | gpkit/tests/t_model.py | Python | mit | 14,190 |
#!/usr/bin/env python
import sys
def convert_str(infile, outfile):
f = open(infile, 'r')
lines = f.readlines()
f.close()
f = open(outfile, 'w')
f.writelines(['"%s\\n"\n' % i.rstrip() for i in lines])
f.close()
def main():
convert_str('fountain.vert', 'fountain.vert.inc')
convert_str('fountain.frag', 'fountain.frag.inc')
if __name__ == '__main__':
main()
| fountainment/FountainEngineImproved | fountain/render/convert_shader.py | Python | mit | 396 |
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, parent)
import lensDES
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DESlens'
copyright = u'2015, ETH Zurich, Institute for Astronomy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = lensDES.__version__
# The full version, including alpha/beta/rc tags.
release = lensDES.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lensDESdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'lensDES.tex', u'DESlens Documentation',
u'Simon Birrer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lensDES', u'DESlens Documentation',
[u'Simon Birrer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'lensDES', u'DESlens Documentation',
u'Simon Birrer', 'lensDES', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
try:
import sphinx_eth_theme
html_theme = "sphinx_eth_theme"
html_theme_path = [sphinx_eth_theme.get_html_theme_path()]
except ImportError:
html_theme = 'default' | DES-SL/EasyLens | docs/conf.py | Python | mit | 8,340 |
"""Global variables for testing."""
from pathlib import Path
from calcipy.file_helpers import delete_dir, ensure_dir
from calcipy.log_helpers import activate_debug_logging
from recipes import __pkg_name__
activate_debug_logging(pkg_names=[__pkg_name__], clear_log=True)
TEST_DIR = Path(__file__).resolve().parent
"""Path to the `test` directory that contains this file and all other tests."""
TEST_DATA_DIR = TEST_DIR / 'data'
"""Path to subdirectory with test data within the Test Directory."""
TEST_TMP_CACHE = TEST_DIR / '_tmp_cache'
"""Path to the temporary cache folder in the Test directory."""
def clear_test_cache() -> None:
"""Remove the test cache directory if present."""
delete_dir(TEST_TMP_CACHE)
ensure_dir(TEST_TMP_CACHE)
| KyleKing/recipes | tests/configuration.py | Python | mit | 758 |
import os, glob
import operator
import h5py
import numpy as np
import matplotlib.pyplot as plt
def get_time_potential_charge_absrbd_on_anode_from_h5( filename ):
h5 = h5py.File( filename, mode="r")
absorbed_charge = h5["/InnerRegions/anode"].attrs["total_absorbed_charge"][0]
time = h5["/TimeGrid"].attrs["current_time"][0]
potential = h5["/InnerRegions/anode"].attrs["potential"][0]
h5.close()
# return( {"time": time,
# "potential": potential,
# "absorbed_charge": absorbed_charge } )
return( (time,
potential,
absorbed_charge ) )
os.chdir("./")
# todo: remove hardcoding
prev_step_filename = "V*_*0900.h5"
last_step_filename = "V*_*1000.h5"
prev_step_vals = []
last_step_vals = []
for f in glob.glob( prev_step_filename ):
prev_step_vals.append( get_time_potential_charge_absrbd_on_anode_from_h5( f ) )
for f in glob.glob( last_step_filename ):
last_step_vals.append( get_time_potential_charge_absrbd_on_anode_from_h5( f ) )
prev_step_vals.sort( key = operator.itemgetter(1) )
last_step_vals.sort( key = operator.itemgetter(1) )
current = []
voltage = []
cgs_to_v = 300
for (t1,V1,q1), (t2,V2,q2) in zip( prev_step_vals, last_step_vals ):
print( t2 - t1, V2 - V1, q2 - q1 )
current.append( abs( ( q2 - q1 ) ) / ( t2 - t1 ) )
voltage.append( V1 * cgs_to_v )
#print( current, voltage )
#A,B = np.polyfit( np.ln( current ), voltage, 1 )
plt.figure()
axes = plt.gca()
axes.set_xlabel( "Voltage [V]" )
axes.set_ylabel( "Current [?]" )
#axes.set_xlim( [0, 1500] )
plt.plot( voltage, current,
linestyle='', marker='o',
label = "Num" )
#plt.plot( current_an, voltage_an,
# label = "An" )
plt.legend()
plt.savefig('diode_VC.png')
| epicf/ef | examples/diode_childs_law/plot.py | Python | mit | 1,764 |
# 二分查找,这题应该归为 easy
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
end_index = len(nums)-1
start_index = 0
while True:
if start_index > len(nums)-1:
return len(nums)
if end_index < 0:
return 0
if start_index > end_index:
return start_index
index = (start_index + end_index)/2
if nums[index] == target:
return index
if nums[index] > target:
end_index = index-1
else:
start_index = index+1
| zhangziang/MyLeetCodeAlgorithms | python/35-search_insert_position.py | Python | mit | 732 |
#!/usr/bin/env python
"""
|jedi| is mostly being tested by what I would call "Blackbox Tests". These
tests are just testing the interface and do input/output testing. This makes a
lot of sense for |jedi|. Jedi supports so many different code structures, that
it is just stupid to write 200'000 unittests in the manner of
``regression.py``. Also, it is impossible to do doctests/unittests on most of
the internal data structures. That's why |jedi| uses mostly these kind of
tests.
There are different kind of tests:
- completions / goto_definitions ``#?``
- goto_assignments: ``#!``
- usages: ``#<``
How to run tests?
+++++++++++++++++
Jedi uses pytest_ to run unit and integration tests. To run tests,
simply run ``py.test``. You can also use tox_ to run tests for
multiple Python versions.
.. _pytest: http://pytest.org
.. _tox: http://testrun.org/tox
Integration test cases are located in ``test/completion`` directory
and each test cases are indicated by the comment ``#?`` (completions /
definitions), ``#!`` (assignments) and ``#<`` (usages). There is also
support for third party libraries. In a normal test run they are not
being executed, you have to provide a ``--thirdparty`` option.
In addition to standard `-k` and `-m` options in py.test, you can use
`-T` (`--test-files`) option to specify integration test cases to run.
It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where
``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line
number of the test comment. Here is some recipes:
Run tests only in ``basic.py`` and ``imports.py``::
py.test test/test_integration.py -T basic.py -T imports.py
Run test at line 4, 6, and 8 in ``basic.py``::
py.test test/test_integration.py -T basic.py:4,6,8
See ``py.test --help`` for more information.
If you want to debug a test, just use the ``--pdb`` option.
Alternate Test Runner
+++++++++++++++++++++
If you don't like the output of ``py.test``, there's an alternate test runner
that you can start by running ``./run.py``. The above example could be run by::
./run.py basic 4 6 8 50-80
The advantage of this runner is simplicity and more customized error reports.
Using both runners will help you to have a quicker overview of what's
happening.
Auto-Completion
+++++++++++++++
Uses comments to specify a test in the next line. The comment says, which
results are expected. The comment always begins with `#?`. The last row
symbolizes the cursor.
For example::
#? ['real']
a = 3; a.rea
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
property.
Goto Definitions
++++++++++++++++
Definition tests use the same symbols like completion tests. This is
possible because the completion tests are defined with a list::
#? int()
ab = 3; ab
Goto Assignments
++++++++++++++++
Tests look like this::
abc = 1
#! ['abc=1']
abc
Additionally it is possible to add a number which describes to position of
the test (otherwise it's just end of line)::
#! 2 ['abc=1']
abc
Usages
++++++
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
"""
import os
import re
import sys
import operator
from ast import literal_eval
from io import StringIO
from functools import reduce
import jedi
from jedi._compatibility import unicode, is_py3
from jedi.parser import Parser, load_grammar
from jedi.api.classes import Definition
TEST_COMPLETIONS = 0
TEST_DEFINITIONS = 1
TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
class IntegrationTestCase(object):
def __init__(self, test_type, correct, line_nr, column, start, line,
path=None, skip=None):
self.test_type = test_type
self.correct = correct
self.line_nr = line_nr
self.column = column
self.start = start
self.line = line
self.path = path
self.skip = skip
@property
def module_name(self):
return os.path.splitext(os.path.basename(self.path))[0]
@property
def line_nr_test(self):
"""The test is always defined on the line before."""
return self.line_nr - 1
def __repr__(self):
return '<%s: %s:%s:%s>' % (self.__class__.__name__, self.module_name,
self.line_nr_test, self.line.rstrip())
def script(self):
return jedi.Script(self.source, self.line_nr, self.column, self.path)
def run(self, compare_cb):
testers = {
TEST_COMPLETIONS: self.run_completion,
TEST_DEFINITIONS: self.run_goto_definitions,
TEST_ASSIGNMENTS: self.run_goto_assignments,
TEST_USAGES: self.run_usages,
}
return testers[self.test_type](compare_cb)
def run_completion(self, compare_cb):
completions = self.script().completions()
#import cProfile; cProfile.run('script.completions()')
comp_str = set([c.name for c in completions])
return compare_cb(self, comp_str, set(literal_eval(self.correct)))
def run_goto_definitions(self, compare_cb):
script = self.script()
evaluator = script._evaluator
def comparison(definition):
suffix = '()' if definition.type == 'instance' else ''
return definition.desc_with_module + suffix
def definition(correct, correct_start, path):
should_be = set()
for match in re.finditer('(?:[^ ]+)', correct):
string = match.group(0)
parser = Parser(load_grammar(), string, start_symbol='eval_input')
parser.position_modifier.line = self.line_nr
element = parser.get_parsed_node()
element.parent = jedi.api.completion.get_user_scope(
script._get_module(),
(self.line_nr, self.column)
)
results = evaluator.eval_element(element)
if not results:
raise Exception('Could not resolve %s on line %s'
% (match.string, self.line_nr - 1))
should_be |= set(Definition(evaluator, r) for r in results)
# Because the objects have different ids, `repr`, then compare.
should = set(comparison(r) for r in should_be)
return should
should = definition(self.correct, self.start, script.path)
result = script.goto_definitions()
is_str = set(comparison(r) for r in result)
return compare_cb(self, is_str, should)
def run_goto_assignments(self, compare_cb):
result = self.script().goto_assignments()
comp_str = str(sorted(str(r.description) for r in result))
return compare_cb(self, comp_str, self.correct)
def run_usages(self, compare_cb):
result = self.script().usages()
self.correct = self.correct.strip()
compare = sorted((r.module_name, r.line, r.column) for r in result)
wanted = []
if not self.correct:
positions = []
else:
positions = literal_eval(self.correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
wanted.append(pos_tup)
else:
line = pos_tup[0]
if pos_tup[0] is not None:
line += self.line_nr
wanted.append((self.module_name, line, pos_tup[1]))
return compare_cb(self, compare, sorted(wanted))
def skip_python_version(line):
comp_map = {
'==': 'eq',
'<=': 'le',
'>=': 'ge',
'<': 'gk',
'>': 'lt',
}
# check for python minimal version number
match = re.match(r" *# *python *([<>]=?|==) *(\d+(?:\.\d+)?)$", line)
if match:
minimal_python_version = tuple(
map(int, match.group(2).split(".")))
operation = getattr(operator, comp_map[match.group(1)])
if not operation(sys.version_info, minimal_python_version):
return "Minimal python version %s %s" % (match.group(1), match.group(2))
return None
def collect_file_tests(path, lines, lines_to_execute):
def makecase(t):
return IntegrationTestCase(t, correct, line_nr, column,
start, line, path=path, skip=skip)
start = None
correct = None
test_type = None
skip = None
for line_nr, line in enumerate(lines, 1):
if correct is not None:
r = re.match('^(\d+)\s*(.*)$', correct)
if r:
column = int(r.group(1))
correct = r.group(2)
start += r.regs[2][0] # second group, start index
else:
column = len(line) - 1 # -1 for the \n
if test_type == '!':
yield makecase(TEST_ASSIGNMENTS)
elif test_type == '<':
yield makecase(TEST_USAGES)
elif correct.startswith('['):
yield makecase(TEST_COMPLETIONS)
else:
yield makecase(TEST_DEFINITIONS)
correct = None
else:
skip = skip or skip_python_version(line)
try:
r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]*)', line)
# test_type is ? for completion and ! for goto_assignments
test_type = r.group(1)
correct = r.group(2)
# Quick hack to make everything work (not quite a bloody unicorn hack though).
if correct == '':
correct = ' '
start = r.start()
except AttributeError:
correct = None
else:
# Skip the test, if this is not specified test.
for l in lines_to_execute:
if isinstance(l, tuple) and l[0] <= line_nr <= l[1] \
or line_nr == l:
break
else:
if lines_to_execute:
correct = None
def collect_dir_tests(base_dir, test_files, check_thirdparty=False):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if f_name.startswith(a[0])]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
skip = None
if check_thirdparty:
lib = f_name.replace('_.py', '')
try:
# there is always an underline at the end.
# It looks like: completion/thirdparty/pylab_.py
__import__(lib)
except ImportError:
skip = 'Thirdparty-Library %s not found.' % lib
path = os.path.join(base_dir, f_name)
if is_py3:
source = open(path, encoding='utf-8').read()
else:
source = unicode(open(path).read(), 'UTF-8')
for case in collect_file_tests(path, StringIO(source),
lines_to_execute):
case.source = source
if skip:
case.skip = skip
yield case
docoptstr = """
Using run.py to make debugging easier with integration tests.
An alternative testing format, which is much more hacky, but very nice to
work with.
Usage:
run.py [--pdb] [--debug] [--thirdparty] [<rest>...]
run.py --help
Options:
-h --help Show this screen.
--pdb Enable pdb debugging on fail.
-d, --debug Enable text output debugging (please install ``colorama``).
--thirdparty Also run thirdparty tests (in ``completion/thirdparty``).
"""
if __name__ == '__main__':
import docopt
arguments = docopt.docopt(docoptstr)
import time
t_start = time.time()
# Sorry I didn't use argparse here. It's because argparse is not in the
# stdlib in 2.5.
import sys
if arguments['--debug']:
jedi.set_debug_function()
# get test list, that should be executed
test_files = {}
last = None
for arg in arguments['<rest>']:
match = re.match('(\d+)-(\d+)', arg)
if match:
start, end = match.groups()
test_files[last].append((int(start), int(end)))
elif arg.isdigit():
if last is None:
continue
test_files[last].append(int(arg))
else:
test_files[arg] = []
last = arg
# completion tests:
dir_ = os.path.dirname(os.path.realpath(__file__))
completion_test_dir = os.path.join(dir_, '../test/completion')
completion_test_dir = os.path.abspath(completion_test_dir)
summary = []
tests_fail = 0
# execute tests
cases = list(collect_dir_tests(completion_test_dir, test_files))
if test_files or arguments['--thirdparty']:
completion_test_dir += '/thirdparty'
cases += collect_dir_tests(completion_test_dir, test_files, True)
def file_change(current, tests, fails):
if current is not None:
current = os.path.basename(current)
print('%s \t\t %s tests and %s fails.' % (current, tests, fails))
def report(case, actual, desired):
if actual == desired:
return 0
else:
print("\ttest fail @%d, actual = %s, desired = %s"
% (case.line_nr - 1, actual, desired))
return 1
import traceback
current = cases[0].path if cases else None
count = fails = 0
for c in cases:
if c.skip:
continue
if current != c.path:
file_change(current, count, fails)
current = c.path
count = fails = 0
try:
if c.run(report):
tests_fail += 1
fails += 1
except Exception:
traceback.print_exc()
print("\ttest fail @%d" % (c.line_nr - 1))
tests_fail += 1
fails += 1
if arguments['--pdb']:
import pdb
pdb.post_mortem()
count += 1
file_change(current, count, fails)
print('\nSummary: (%s fails of %s tests) in %.3fs'
% (tests_fail, len(cases), time.time() - t_start))
for s in summary:
print(s)
exit_code = 1 if tests_fail else 0
sys.exit(exit_code)
| rfguri/vimfiles | bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/run.py | Python | mit | 14,446 |
from django.conf.urls import url
from fundraiser_app import views
urlpatterns = [
url(r'^$', views.FMItemListView.as_view(), name='fmitem_list'),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^fmitem/(?P<pk>\d+)$', views.FMItemDetailView.as_view(), name='fmitem_detail'),
url(r'^fmitem/new$', views.FMItemCreateView.as_view(), name='fmitem_new'),
url(r'^fmitem/(?P<pk>\d+)/edit$', views.FMItemUpdateView.as_view(), name='fmitem_edit'),
url(r'^fmitem/(?P<pk>\d+)/remove$', views.FMItemDeleteView.as_view(), name='fmitem_remove'),
url(r'^fmitem/(?P<pk>\d+)/publish/$', views.fmitem_publish, name='fmitem_publish'),
]
| CarlGraff/fundraisermemorial | fundraiser_app/urls.py | Python | mit | 663 |
from django.http import HttpResponse
def hello(request):
return HttpResponse("Hello world ! ") | battlecat/Spirit | HelloWorld/HelloWorld/view.py | Python | mit | 96 |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'celcius.settings')
app = Celery('celsius')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| cytex124/celsius-cloud-backend | src/celsius/celery.py | Python | mit | 623 |
import numbers
import numpy
import cupy
###############################################################################
# Private utility functions.
def _round_if_needed(arr, dtype):
"""Rounds arr inplace if the destination dtype is an integer.
"""
if cupy.issubdtype(dtype, cupy.integer):
arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330)
def _slice_at_axis(sl, axis):
"""Constructs a tuple of slices to slice an array in the given dimension.
Args:
sl(slice): The slice for the given dimension.
axis(int): The axis to which `sl` is applied. All other dimensions are
left "unsliced".
Returns:
tuple of slices: A tuple with slices matching `shape` in length.
"""
return (slice(None),) * axis + (sl,) + (Ellipsis,)
def _view_roi(array, original_area_slice, axis):
"""Gets a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Args:
array(cupy.ndarray): The array with the region of interest.
original_area_slice(tuple of slices): Denotes the area with original
values of the unpadded array.
axis(int): The currently padded dimension assuming that `axis` is padded
before `axis` + 1.
Returns:
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl]
def _pad_simple(array, pad_width, fill_value=None):
"""Pads an array on all sides with either a constant or undefined values.
Args:
array(cupy.ndarray): Array to grow.
pad_width(sequence of tuple[int, int]): Pad width on both sides for each
dimension in `arr`.
fill_value(scalar, optional): If provided the padded area is
filled with this value, otherwise the pad area left undefined.
(Default value = None)
"""
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
padded = cupy.empty(new_shape, dtype=array.dtype, order=order)
if fill_value is not None:
padded.fill(fill_value)
# Copy old array into correct space
original_area_slice = tuple(
slice(left, left + size)
for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded, original_area_slice
def _set_pad_area(padded, axis, width_pair, value_pair):
"""Set an empty-padded area in given dimension.
"""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1]
def _get_edges(padded, axis, width_pair):
"""Retrieves edge values from an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the edges are considered.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""Constructs linear ramps for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the ramps are constructed.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
end_value_pair((scalar, scalar)): End values for the linear ramps which
form the edge of the fully padded array. These values are included in
the linear ramps.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp = cupy.linspace(
start=end_value_pair[0],
# squeeze axis replaced by linspace
stop=edge_pair[0].squeeze(axis),
num=width_pair[0],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
right_ramp = cupy.linspace(
start=end_value_pair[1],
# squeeze axis replaced by linspace
stop=edge_pair[1].squeeze(axis),
num=width_pair[1],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""Calculates a statistic for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the statistic is calculated.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
length_pair(2-element sequence of None or int): Gives the number of
values in valid area from each side that is taken into account when
calculating the statistic. If None the entire valid area in `padded`
is considered.
stat_func(function): Function to compute statistic. The expected
signature is
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
"""
# Calculate indices of the edges of the area with original values
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
# as well as its length
max_length = right_index - left_index
# Limit stat_lengths to max_length
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis
)
left_chunk = padded[left_slice]
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
_round_if_needed(left_stat, padded.dtype)
if left_length == right_length == max_length:
# return early as right_stat must be identical to left_stat
return left_stat, left_stat
# Calculate statistic for the right side
right_slice = _slice_at_axis(
slice(right_index - right_length, right_index), axis
)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""Pads an `axis` of `arr` using reflection.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
method(str): Controls method of reflection; options are 'even' or 'odd'.
include_edge(bool, optional): If true, edge value is included in
reflection, otherwise the edge value forms the symmetric axis to the
reflection. (Default value = False)
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, left_pad)
# Slice right to left, stop on or next to edge, start relative to stop
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
# Insert chunk into padded area
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
# Adjust pointer to left edge for next iteration
left_pad -= chunk_length
if right_pad > 0:
# Pad with reflected values on right side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, right_pad)
# Slice right to left, start on or next to edge, stop relative to start
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(
slice(-right_pad - 1, -right_pad), axis
)
right_chunk = 2 * padded[edge_slice] - right_chunk
# Insert chunk into padded area
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
# Adjust pointer to right edge for next iteration
right_pad -= chunk_length
return left_pad, right_pad
def _set_wrap_both(padded, axis, width_pair):
"""Pads an `axis` of `arr` with wrapped values.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
# Each time the pad area shrinks on both sides which is communicated with
# these variables.
new_left_pad = 0
new_right_pad = 0
if left_pad > 0:
# Pad with wrapped values on left side
# First slice chunk from right side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
# pad area
right_slice = _slice_at_axis(
slice(
-right_pad - min(period, left_pad),
-right_pad if right_pad != 0 else None,
),
axis,
)
right_chunk = padded[right_slice]
if left_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
new_left_pad = left_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if right_pad > 0:
# Pad with wrapped values on right side
# First slice chunk from left side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
# pad area
left_slice = _slice_at_axis(
slice(left_pad, left_pad + min(period, right_pad)), axis
)
left_chunk = padded[left_slice]
if right_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(
slice(-right_pad, -right_pad + period), axis
)
new_right_pad = right_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
padded[pad_area] = left_chunk
return new_left_pad, new_right_pad
def _as_pairs(x, ndim, as_index=False):
"""Broadcasts `x` to an array with shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Args:
x(scalar or array-like, optional): The object to broadcast to the shape
(`ndim`, 2).
ndim(int): Number of pairs the broadcasted `x` will have.
as_index(bool, optional): If `x` is not None, try to round each
element of `x` to an integer (dtype `cupy.intp`) and ensure every
element is positive. (Default value = False)
Returns:
nested iterables, shape (`ndim`, 2): The broadcasted version of `x`.
"""
if x is None:
# Pass through None as a special case, otherwise cupy.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
elif isinstance(x, numbers.Number):
if as_index:
x = round(x)
return ((x, x),) * ndim
x = numpy.array(x)
if as_index:
x = numpy.asarray(numpy.round(x), dtype=numpy.intp)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `numpy.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
x_view = x.view()
x_view.shape = (ndim, 2)
return x_view.tolist()
# def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
# return (array,)
###############################################################################
# Public functions
# @array_function_dispatch(_pad_dispatcher, module='numpy')
def pad(array, pad_width, mode='constant', **kwargs):
"""Pads an array with specified widths and values.
Args:
array(cupy.ndarray): The array to pad.
pad_width(sequence, array_like or int): Number of values padded to the
edges of each axis. ((before_1, after_1), ... (before_N, after_N))
unique pad widths for each axis. ((before, after),) yields same
before and after pad for each axis. (pad,) or int is a shortcut for
before = after = pad width for all axes. You cannot specify
``cupy.ndarray``.
mode(str or function, optional): One of the following string values or a
user supplied function
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the array edge
value.
'maximum'
Pads with the maximum value of all or part of the vector along
each axis.
'mean'
Pads with the mean value of all or part of the vector along each
axis.
'median'
Pads with the median value of all or part of the vector along
each axis. (Not Implemented)
'minimum'
Pads with the minimum value of all or part of the vector along
each axis.
'reflect'
Pads with the reflection of the vector mirrored on the first and
last values of the vector along each axis.
'symmetric'
Pads with the reflection of the vector mirrored along the edge
of the array.
'wrap'
Pads with the wrap of the vector along the axis. The first
values are used to pad the end and the end values are used to
pad the beginning.
'empty'
Pads with undefined values.
<function>
Padding function, see Notes.
stat_length(sequence or int, optional): Used in 'maximum', 'mean',
'median', and 'minimum'. Number of values at edge of each axis used
to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis. ((before, after),) yields same before and
after statistic lengths for each axis. (stat_length,) or int is a
shortcut for before = after = statistic length for all axes.
Default is ``None``, to use the entire axis. You cannot specify
``cupy.ndarray``.
constant_values(sequence or scalar, optional): Used in 'constant'. The
values to set the padded values for each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
end_values(sequence or scalar, optional): Used in 'linear_ramp'. The
values used for the ending value of the linear_ramp and that will
form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end
values for each axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
reflect_type({'even', 'odd'}, optional): Used in 'reflect', and
'symmetric'. The 'even' style is the default with an unaltered
reflection around the edge value. For the 'odd' style, the extended
part of the array is created by subtracting the reflected values from
two times the edge value.
Returns:
cupy.ndarray: Padded array with shape extended by ``pad_width``.
.. note::
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should modify a rank 1 array in-place.
It has the following signature:
``padding_func(vector, iaxis_pad_width, iaxis, kwargs)``
where
vector (cupy.ndarray)
A rank 1 array already padded with zeros. Padded values are
``vector[:iaxis_pad_width[0]]`` and
``vector[-iaxis_pad_width[1]:]``.
iaxis_pad_width (tuple)
A 2-tuple of ints, ``iaxis_pad_width[0]`` represents the number of
values padded at the beginning of vector where
``iaxis_pad_width[1]`` represents the number of values padded at
the end of vector.
iaxis (int)
The axis currently being calculated.
kwargs (dict)
Any keyword arguments the function requires.
Examples
--------
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'constant', constant_values=(4, 6))
array([4, 4, 1, ..., 6, 6, 6])
>>> cupy.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> cupy.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> cupy.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> cupy.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = cupy.array([[1, 2], [3, 4]])
>>> cupy.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> cupy.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> cupy.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> cupy.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> cupy.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def pad_with(vector, pad_width, iaxis, kwargs):
... pad_value = kwargs.get('padder', 10)
... vector[:pad_width[0]] = pad_value
... vector[-pad_width[1]:] = pad_value
>>> a = cupy.arange(6)
>>> a = a.reshape((2, 3))
>>> cupy.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
>>> cupy.pad(a, 2, pad_with, padder=100)
array([[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 0, 1, 2, 100, 100],
[100, 100, 3, 4, 5, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100]])
"""
if isinstance(pad_width, numbers.Integral):
pad_width = ((pad_width, pad_width),) * array.ndim
else:
pad_width = numpy.asarray(pad_width)
if not pad_width.dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
# Broadcast to shape (array.ndim, 2)
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
if callable(mode):
# Old behavior: Use user-supplied function with numpy.apply_along_axis
function = mode
# Create a new zero padded array
padded, _ = _pad_simple(array, pad_width, fill_value=0)
# And apply along each axis
for axis in range(padded.ndim):
# Iterate using ndindex as in apply_along_axis, but assuming that
# function operates inplace on the padded array.
# view with the iteration axis at the end
view = cupy.moveaxis(padded, axis, -1)
# compute indices for the iteration axes, and append a trailing
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
inds = numpy.ndindex(view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
function(view[ind], pad_width[axis], axis, kwargs)
return padded
# Make sure that no unsupported keywords were passed for the current mode
allowed_kwargs = {
'empty': [],
'edge': [],
'wrap': [],
'constant': ['constant_values'],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
# 'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
}
try:
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
except KeyError:
raise ValueError("mode '{}' is not supported".format(mode))
if unsupported_kwargs:
raise ValueError(
"unsupported keyword arguments for mode '{}': {}".format(
mode, unsupported_kwargs
)
)
if mode == 'constant':
values = kwargs.get('constant_values', 0)
if isinstance(values, numbers.Number) and values == 0 and (
array.ndim == 1 or array.size < 4e6):
# faster path for 1d arrays or small n-dimensional arrays
return _pad_simple(array, pad_width, 0)[0]
stat_functions = {
'maximum': cupy.max,
'minimum': cupy.min,
'mean': cupy.mean,
# 'median': cupy.median,
}
# Create array with final shape and original values
# (padded area is undefined)
padded, original_area_slice = _pad_simple(array, pad_width)
# And prepare iteration over all dimensions
# (zipping may be more readable than using enumerate)
axes = range(padded.ndim)
if mode == 'constant':
values = _as_pairs(values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, values):
roi = _view_roi(padded, original_area_slice, axis)
_set_pad_area(roi, axis, width_pair, value_pair)
elif mode == 'empty':
pass # Do nothing as _pad_simple already returned the correct result
elif array.size == 0:
# Only modes 'constant' and 'empty' can extend empty axes, all other
# modes depend on `array` not being empty
# -> ensure every empty axis is only 'padded with 0'
for axis, width_pair in zip(axes, pad_width):
if array.shape[axis] == 0 and any(width_pair):
raise ValueError(
"can't extend empty axis {} using modes other than "
"'constant' or 'empty'".format(axis)
)
# passed, don't need to do anything more as _pad_simple already
# returned the correct result
elif mode == 'edge':
for axis, width_pair in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
edge_pair = _get_edges(roi, axis, width_pair)
_set_pad_area(roi, axis, width_pair, edge_pair)
elif mode == 'linear_ramp':
end_values = kwargs.get('end_values', 0)
end_values = _as_pairs(end_values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
roi = _view_roi(padded, original_area_slice, axis)
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
_set_pad_area(roi, axis, width_pair, ramp_pair)
elif mode in stat_functions:
func = stat_functions[mode]
length = kwargs.get('stat_length', None)
length = _as_pairs(length, padded.ndim, as_index=True)
for axis, width_pair, length_pair in zip(axes, pad_width, length):
roi = _view_roi(padded, original_area_slice, axis)
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
_set_pad_area(roi, axis, width_pair, stat_pair)
elif mode in {'reflect', 'symmetric'}:
method = kwargs.get('reflect_type', 'even')
include_edge = True if mode == 'symmetric' else False
for axis, (left_index, right_index) in zip(axes, pad_width):
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
edge_pair = _get_edges(padded, axis, (left_index, right_index))
_set_pad_area(
padded, axis, (left_index, right_index), edge_pair
)
continue
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with reflected
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_reflect_both(
roi, axis, (left_index, right_index), method, include_edge
)
elif mode == 'wrap':
for axis, (left_index, right_index) in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with wrapped
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_wrap_both(
roi, axis, (left_index, right_index)
)
return padded
| cupy/cupy | cupy/_padding/pad.py | Python | mit | 29,422 |
import sys
from pymongo import MongoClient
from werkzeug.utils import secure_filename
import os
import sys
client = MongoClient('mongodb://localhost:27017/')
db = client.ir
#li=[]
#color=open("AllColors.txt","r")
doc1=[]
doc2=[]
edgeConWT=[]
edgeElaWT=[]
edgeStart=[]
edgeEnd=[]
path="JVcode/Scripts/ForClassification/"
for file in os.listdir(path):
edgeElaWT = []
edgeConWT = []
edgeStart = []
edgeEnd = []
print (file)
if file.endswith(".tab.scores"):
fdTemp=open(path+file,"r")
#fdOut=open("output/new/elab-"+file,"w+")
for i1 in fdTemp:
line=i1.split(" ")
#print line
edgeStart.append(line[0])
edgeEnd.append(line[1])
edgeConWT.append(float(line[2]))
if(float(line[3]))>0:
edgeElaWT.append(float(line[3]))
else:
edgeElaWT.append(0.0)
for i in range(0,len(edgeElaWT)):
for j in range(0, len(edgeElaWT)):
if (j < (len(edgeConWT) - 1)):
if (edgeElaWT[j] < edgeElaWT[j + 1]):
temp = edgeElaWT[j]
edgeElaWT[j] = edgeElaWT[j + 1]
edgeElaWT[j + 1] = temp
temp2 = edgeStart[j]
edgeStart[j] = edgeStart[j + 1]
edgeStart[j + 1] = temp2
temp3 = edgeEnd[j]
edgeEnd[j] = edgeEnd[j + 1]
edgeEnd[j + 1] = temp3
#print (edgeEnd,edgeElaWT)
t2 = []
for k in range(0,5):
results = db.papers.find_one({'filename': edgeEnd[k][:-3] + 'pdf'})
print results
h={}
h['name'] = results['_id']
h['domain']=results['domain']
t2.append(h)
print ("To update : ",t2)
print("for => ",file)
str1="db.rPaper.update({'filename':"+ file[:-10]+"'pdf'}, {'$set': {'elaboration':}})"
print(str1)
results = db.rPaper.update({'filename': file[:-10]+'pdf'}, {'$set': {'elaboration': t2}})
print (results)
print "DONE" | ramaganapathy1/AMuDA-Ir-back-end | production/JVcode/Scripts/ForClassification/ela-sep.py | Python | mit | 2,166 |
from .util import to_datetime, to_iso
from .http import request
from .exceptions import KloudlessException as KException
from . import config
import inspect
import json
import requests
import six
import warnings
class BaseResource(dict):
# {'key': (serializer, deserializer)}
_serializers = {
'created': (to_iso, to_datetime),
'modified': (to_iso, to_datetime),
'expiration': (to_iso, to_datetime),
'expiry': (to_iso, to_datetime),
'token_expiry': (to_iso, to_datetime),
'refresh_token_expiry': (to_iso, to_datetime),
}
_path_segment = None
_parent_resource_class = None
# requests.Session's connection pool could cause failures due to the lack
# of keep-alives causing the connection to drop unexpectedly.
# Use `requests` to be safe, but alter if better performance is preferable.
_api_session = requests
def __init__(self, id=None, parent_resource=None, configuration=None):
if not configuration:
configuration = {}
self._configuration = config.merge(configuration)
self['id'] = id
# Saved state, as returned by the Kloudless API.
self._previous_data = {}
# Keys that used to be present that no longer are post-save.
# Useful for more helpful error messages.
self._removed_keys = set()
self._parent_resource = parent_resource
if self._parent_resource_class is not None:
if self._parent_resource is None:
raise KException(
"A %s object or ID must be specified as this "
"%s object's parent." %
(self._parent_resource_class,
self.__class__.__name__))
def populate(self, data):
"""
data: Response from Kloudless with data on this object.
"""
removed = set(self.keys()) - set(data.keys())
self._removed_keys |= removed
id = self['id']
self.clear()
for k, v in data.items():
if k in self._serializers:
data[k] = self._serializers[k][1](v)
for k, v in six.iteritems(data):
super(BaseResource, self).__setitem__(
k, self.__class__.create_from_data(
v, parent_resource=self._parent_resource,
configuration=self._configuration))
if 'id' not in self:
self['id'] = id
# Update our state.
self._previous_data = self.serialize(self)
@classmethod
def create_from_data(cls, data, parent_resource=None, configuration=None):
if isinstance(data, list):
return [cls.create_from_data(
d, parent_resource=parent_resource,
configuration=configuration) for d in data]
elif isinstance(data, dict) and not isinstance(data, BaseResource):
data = data.copy()
klass = cls
data_type = None
if data.get('api') and data.get('type'):
data_type = data['api'] + '_' + data['type']
if data_type in resources:
klass = resources[data_type]
elif data.get('type') in resources:
klass = resources[data['type']]
instance = klass(id=data.get('id'),
parent_resource=parent_resource,
configuration=configuration)
instance.populate(data)
return instance
else:
return data
@classmethod
def serialize(cls, resource_data):
"""
Converts values in the BaseResource object into primitive types.
This helps convert the entire object to JSON.
resource_data: Either the resource object, or a dict with the data
to populate the resource.
"""
serialized = {}
for k, v in six.iteritems(resource_data):
if isinstance(v, BaseResource):
serialized[k] = v.serialize(v)
elif k in cls._serializers:
serialized[k] = cls._serializers[k][0](v)
else:
serialized[k] = v
return serialized
@classmethod
def list_path(cls, parent_resource):
raise NotImplementedError("Subclasses must implement list_path.")
def detail_path(self):
if not self['id']:
raise KException("The detail_path cannot be obtained since the ID "
"is unknown.")
return "%s/%s" % (self.list_path(self._parent_resource), self['id'])
# Getter/Setter methods
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__:
return super(BaseResource, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError as e:
raise AttributeError(*e.args)
def __setitem__(self, k, v):
super(BaseResource, self).__setitem__(k, v)
def __getitem__(self, k):
try:
return super(BaseResource, self).__getitem__(k)
except KeyError:
if k in self._removed_keys:
raise KeyError(
"%r. The key %s was previously present but no longer is. "
"This is due to the object being updated with new "
"information returned from the Kloudless API, probably "
"due to the object being saved. Here are the current "
"attributes of this object: %s" %
(k, k, ', '.join(self.keys())))
else:
raise
def __delitem__(self, k):
raise TypeError(
"Items cannot be deleted. Please set them to None instead if you "
"wish to clear them.")
class AnnotatedList(list):
"""
Given a deserialized response of all(), the objects returned by the API
will be made iterable, and the other attributes will become attributes
of this AnnotatedList object.
"""
def __init__(self, all_data):
if isinstance(all_data, list):
return all_data
objects = None
for k, v in six.iteritems(all_data):
if k in ['objects', 'permissions', 'properties'] and isinstance(v, list):
objects = v
else:
setattr(self, k, v)
if objects is None:
raise KException("No lists were found!")
list.__init__(self, objects)
def allow_proxy(func):
func.allow_proxy = True
return func
class ListMixin(object):
@classmethod
@allow_proxy
def all(cls, parent_resource=None, configuration=None,
headers=None, **params):
response = request(cls._api_session.get,
cls.list_path(parent_resource),
configuration=configuration,
headers=headers, params=params)
data = cls.create_from_data(
response.json(), parent_resource=parent_resource,
configuration=configuration)
return AnnotatedList(data)
class RetrieveMixin(object):
@classmethod
@allow_proxy
def retrieve(cls, id, parent_resource=None, configuration=None,
headers=None, **params):
instance = cls(id=id, parent_resource=parent_resource,
configuration=configuration)
response = request(cls._api_session.get, instance.detail_path(),
configuration=configuration,
headers=headers, params=params)
instance.populate(response.json())
return instance
def refresh(self, headers=None):
"""
Retrieves and sets new metadata for the resource.
"""
response = request(self._api_session.get, self.detail_path(),
configuration=self._configuration,
headers=headers)
self.populate(response.json())
class ReadMixin(RetrieveMixin, ListMixin):
pass
class CreateMixin(object):
@classmethod
@allow_proxy
def create(cls, data=None, params=None, method='post',
parent_resource=None, configuration=None, headers=None):
"""
params: A dict containing query parameters.
data: A dict containing data.
"""
method = getattr(cls._api_session, method)
if not data:
data = {}
if type(data) in [list, tuple]:
data = [cls.serialize(data_obj) for data_obj in data]
else:
data = cls.serialize(data)
if not params:
params = {}
response = request(method, cls.list_path(parent_resource),
configuration=configuration, headers=headers,
data=data, params=params)
return cls.create_from_data(
response.json(), parent_resource=parent_resource,
configuration=configuration)
class UpdateMixin(object):
def _data_to_save(self, new_data):
"""
Override this for any specific checks or additions to data.
"""
return new_data
def save(self, headers=None, **params):
data = self.serialize(self)
new_data = {}
for k, v in six.iteritems(data):
if k not in self._previous_data or self._previous_data[k] != v:
# Attribute is new or was updated
new_data[k] = v
new_data = self._data_to_save(new_data)
if new_data:
if self['id'] is None:
if hasattr(self.__class__, 'create'):
raise KException("No ID provided. Use create() to create "
"new resources instead.")
else:
raise KException("No ID provided to identify the resource "
"to update.")
response = request(self._api_session.patch, self.detail_path(),
configuration=self._configuration,
headers=headers, data=new_data,
params=params)
self.populate(response.json())
# For some resources (eg: File/Folder), the parent resource could
# be different. Check for that.
# This assumes that if the metadata contains an 'account' key,
# it maps to the correct Account ID. We update our parent
# resource with the ID and it's metadata if it is different.
res_type = resource_types[self.__class__]
if (self._parent_resource and res_type in ['file', 'folder', 'link']):
parent_res_type = resource_types[self._parent_resource_class]
if (hasattr(self, parent_res_type) and
self._parent_resource.id != self[parent_res_type]):
self._parent_resource.id = self[parent_res_type]
self._parent_resource.refresh()
return True
return False
class DeleteMixin(object):
def delete(self, headers=None, **params):
request(self._api_session.delete, self.detail_path(),
configuration=self._configuration,
headers=headers, params=params)
self.populate({})
class CopyMixin(object):
def _copy(self, headers=None, **data):
"""
Copy the file/folder to another location.
"""
response = request(self._api_session.post,
"%s/copy" % self.detail_path(),
configuration=self._configuration,
headers=headers, data=data)
return self.__class__.create_from_data(
response.json(), parent_resource=self._parent_resource,
configuration=self._configuration)
class WriteMixin(CreateMixin, UpdateMixin, DeleteMixin):
pass
class ResourceProxy(object):
"""
Create a proxy object. Whenever a function is called on it
that is present on the underlying model, we attempt to call
the underlying model. This is useful because resources can add in
parameters like the parent_resource if it has not been specified yet.
The Account resource does this.
"""
def __init__(self, klass, parent_resource=None, configuration=None):
self.klass = klass
self.parent_resource = parent_resource
self.configuration = configuration
def __getattr__(self, name):
method = getattr(self.klass, name, None)
def proxy_method(self, *args, **kwargs):
self.update_kwargs(kwargs)
return method(*args, **kwargs)
if inspect.ismethod(method):
if getattr(method, 'allow_proxy', False):
return proxy_method.__get__(self)
else:
return method
else:
raise AttributeError(name)
def __call__(self, *args, **kwargs):
self.update_kwargs(kwargs)
return self.klass(*args, **kwargs)
def update_kwargs(self, kwargs):
if 'parent_resource' not in kwargs:
kwargs['parent_resource'] = self.parent_resource
if 'configuration' not in kwargs:
kwargs['configuration'] = self.configuration
class Proxy:
def _get_proxy(self, resource_name):
if not getattr(self, '_proxies', None):
setattr(self, '_proxies', {})
resource = resources[resource_name]
if self._proxies.get(resource_name) is None:
self._proxies[resource_name] = ResourceProxy(
resource, parent_resource=self,
configuration=self._configuration)
return self._proxies[resource_name]
class Account(BaseResource, ReadMixin, WriteMixin, Proxy):
def __init__(self, *args, **kwargs):
super(Account, self).__init__(*args, **kwargs)
@classmethod
def list_path(cls, parent_resource):
return 'accounts'
@classmethod
def serialize_account(cls, resource_data):
account_properties = ['active', 'account', 'service', 'token',
'token_secret', 'refresh_token', 'token_expiry',
'refresh_token_expiry']
serialized = {}
for k, v in six.iteritems(resource_data):
if isinstance(v, BaseResource):
serialized[k] = v.serialize_account(v)
elif k not in account_properties:
continue
elif k in cls._serializers:
serialized[k] = cls._serializers[k][0](v)
else:
serialized[k] = v
return serialized
def save(self, headers=None, **params):
# TODO: add in fields token, token_secret, refresh_token
request(self._api_session.patch, self.detail_path(),
configuration=self._configuration, headers=headers,
data=self.serialize_account(self), params=params)
def convert(self, headers=None, data=None, params=None):
# Deprecated in favor of encode_raw_id
params = {} if params is None else params
data = {} if data is None else data
convert_path = "%s/%s" % (self.detail_path(), 'storage/convert_id')
response = request(self._api_session.post, convert_path,
configuration=self._configuration,
headers=headers, data=data, params=params)
return response.json()
def encode_raw_id(self, data=None, params=None, headers=None):
path = "%s/encode_raw_id" % self.detail_path()
return request(
self._api_session.post, path, data=data or {}, params=params or {},
headers=headers, configuration=self._configuration).json()
def raw(self, raw_uri='', raw_method='GET', data=None, params=None,
headers=None):
"""
raw_uri: Upstream URI to make the pass-through API request to.
raw_method: HTTP Method to make the pass-through request with.
params: A dict containing query parameters.
data: A dict containing data.
"""
data = data or {}
params = params or {}
headers = headers or {}
headers['X-Kloudless-Raw-URI'] = raw_uri
headers['X-Kloudless-Raw-Method'] = raw_method
return request(
self._api_session.post, "%s/raw" % self.detail_path(), data=data,
headers=headers, params=params, configuration=self._configuration)
@property
def links(self):
return self._get_proxy('link')
@property
def files(self):
return self._get_proxy('file')
@property
def folders(self):
return self._get_proxy('folder')
@property
def search(self):
return self._get_proxy('search')
@property
def recent(self):
return self._get_proxy('recent')
@property
def calendars(self):
return self._get_proxy('calendars')
@property
def events(self):
return self._get_proxy('events')
@property
def multipart(self):
return self._get_proxy('multipart')
@property
def users(self):
return self._get_proxy('user')
@property
def groups(self):
return self._get_proxy('group')
@property
def crm_objects(self):
return self._get_proxy('crm_object')
@property
def crm_accounts(self):
return self._get_proxy('crm_account')
@property
def crm_contacts(self):
return self._get_proxy('crm_contact')
@property
def crm_leads(self):
return self._get_proxy('crm_lead')
@property
def crm_opportunities(self):
return self._get_proxy('crm_opportunity')
@property
def crm_campaigns(self):
return self._get_proxy('crm_campaign')
@property
def crm_tasks(self):
return self._get_proxy('crm_task')
@property
def crm_batch(self):
return self._get_proxy('crm_batch')
@property
def crm_recent(self):
return self._get_proxy('crm_recent')
@property
def crm_search(self):
return self._get_proxy('crm_search')
@property
def crm_events(self):
return self._get_proxy('crm_events')
class AccountBaseResource(BaseResource):
_parent_resource_class = Account
def __init__(self, *accounts, **kwargs):
"""
accounts should only be a list with 1 account in it.
"""
if accounts:
kwargs['parent_resource'] = accounts[0]
super(AccountBaseResource, self).__init__(**kwargs)
@classmethod
def list_path(cls, account):
account_path = account.detail_path()
return "%s/%s" % (account_path, cls._path_segment)
class FileSystem(BaseResource, Proxy):
_path_segment = None
@property
def permissions(self):
return self._get_proxy('permission')
class FileSystemBaseResource(BaseResource):
_parent_resource_class = FileSystem
def __init__(self, *files, **kwargs):
if files:
kwargs['parent_resource'] = files[0]
super(FileSystemBaseResource, self).__init__(**kwargs)
@classmethod
def list_path(cls, file):
file_path = file.detail_path()
return "%s/%s" % (file_path, cls._path_segment)
class File(AccountBaseResource, RetrieveMixin, DeleteMixin, UpdateMixin,
CopyMixin, FileSystem):
_path_segment = 'storage/files'
@property
def properties(self):
return self._get_proxy('property')
@classmethod
@allow_proxy
def create(cls, file_name='', parent_id='root', file_data='', params=None,
headers=None, parent_resource=None, configuration=None):
"""
This handles file uploads.
`file_data` can be either a string with file data in it or a
file-like object.
"""
all_headers = {
'X-Kloudless-Metadata': json.dumps({
'name': file_name,
'parent_id': parent_id,
}),
'Content-Type': 'application/octet-stream',
}
all_headers.update(headers or {})
response = request(cls._api_session.post, cls.list_path(parent_resource),
data=file_data, params=params, headers=all_headers,
configuration=configuration)
return cls.create_from_data(
response.json(), parent_resource=parent_resource,
configuration=configuration)
def update(self, file_data='', params=None, headers=None):
"""
This overwites the file specified by 'file_id' with the contents of
`file_data`.
`file_data` can be either a string with file data in it or a
file-like object.
"""
headers = headers or {}
headers.setdefault('Content-Type', 'application/octet-stream')
response = request(self._api_session.put, self.detail_path(),
data=file_data, params=params, headers=headers,
configuration=self._configuration)
self.populate(response.json())
return True
def contents(self, headers=None):
"""
This handles file downloads. It returns a requests.Response object
with contents:
from contextlib import closing
with closing(account.files(id=file_id).contents()) as r:
# Do things with response here
data = r.content
For more information, see the documentation for requests.Response's
Body content workflow.
"""
response = request(self._api_session.get,
"%s/contents" % self.detail_path(),
configuration=self._configuration,
headers=headers, stream=True)
return response
def copy_file(self, headers=None, **data):
return self._copy(headers=headers, **data)
@classmethod
@allow_proxy
def upload_url(cls, data=None, params=None,
parent_resource=None, configuration=None, headers=None):
upload_url_path = "%s/%s" % (cls.list_path(parent_resource), 'upload_url')
response = request(cls._api_session.post, upload_url_path,
configuration=configuration, data=data or {},
params=params or {}, headers=headers)
return response.json()
class Folder(AccountBaseResource, RetrieveMixin, DeleteMixin, UpdateMixin,
CreateMixin, CopyMixin, FileSystem):
_path_segment = 'storage/folders'
def __init__(self, *args, **kwargs):
kwargs.setdefault('id', 'root')
super(Folder, self).__init__(*args, **kwargs)
def contents(self, headers=None):
response = request(self._api_session.get,
"%s/contents" % self.detail_path(),
configuration=self._configuration,
headers=headers)
data = self.create_from_data(
response.json(), parent_resource=self._parent_resource,
configuration=self._configuration)
return AnnotatedList(data)
def copy_folder(self, headers=None, **data):
return self._copy(headers=headers, **data)
class Link(AccountBaseResource, ReadMixin, WriteMixin):
_path_segment = 'storage/links'
class Search(AccountBaseResource, ListMixin):
_path_segment = 'storage/search'
class Recent(AccountBaseResource, ListMixin):
_path_segment = 'storage/recent'
class Calendar(AccountBaseResource, ReadMixin, WriteMixin, Proxy):
_path_segment = 'cal/calendars'
@property
def events(self):
return self._get_proxy('calendar_events')
class CalendarEvents(Calendar):
_path_segment = 'events'
class Events(AccountBaseResource, ListMixin):
_path_segment = 'events'
@classmethod
@allow_proxy
def latest_cursor(cls, parent_resource=None, configuration=None,
headers=None):
response = request(cls._api_session.get,
"%s/latest" % cls.list_path(parent_resource),
configuration=configuration, headers=headers)
data = response.json()
if 'cursor' in data:
return data['cursor']
else:
return data
class Multipart(AccountBaseResource, RetrieveMixin, CreateMixin, DeleteMixin):
"""
Multipart Uploads.
Create the multipart upload first, prior to uploading chunks of data.
Complete the upload once all chunks have been uploaded.
"""
_path_segment = 'storage/multipart'
def upload_chunk(self, part_number=None, data='',
parent_resource=None, configuration=None,
headers=None, **params):
"""
This handles uploading chunks of the file, after a multipart upload has
been initiated.
`part_number`
`data` can be either a string with file data in it or a
file-like object.
"""
params.update({'part_number': part_number})
headers = headers or {}
headers.setdefault('Content-Type', 'application/octet-stream')
request(self._api_session.put, self.detail_path(),
data=data, params=params, headers=headers,
configuration=configuration)
return True
def complete(self, headers=None, **params):
"""
Completes the multipart upload and returns a File object.
"""
response = request(self._api_session.post,
"%s/complete" % self.detail_path(),
params=params, configuration=self._configuration,
headers=headers)
return File.create_from_data(
response.json(), parent_resource=self._parent_resource,
configuration=self._configuration)
class Permission(FileSystemBaseResource, ListMixin, CreateMixin):
_path_segment = 'permissions'
@classmethod
@allow_proxy
def all(cls, parent_resource=None, configuration=None,
headers=None, **params):
response = request(cls._api_session.get,
cls.list_path(parent_resource),
configuration=configuration, headers=headers,
params=params)
response_json = response.json()
permissions = response_json.get('permissions')
for perm in permissions:
perm['type'] = 'permission'
response_json['permissions'] = permissions
data = cls.create_from_data(
response_json, parent_resource=parent_resource,
configuration=configuration)
return AnnotatedList(data)
@classmethod
@allow_proxy
def create(cls, params=None, parent_resource=None, configuration=None,
data=None, headers=None):
return super(Permission, cls).create(params=params,
parent_resource=parent_resource,
configuration=configuration,
method='put', data=data,
headers=headers)
@classmethod
@allow_proxy
def update(cls, params=None, parent_resource=None, configuration=None,
data=None, headers=None):
return super(Permission, cls).create(params=params,
parent_resource=parent_resource,
configuration=configuration,
method='patch', data=data,
headers=headers)
class Property(FileSystemBaseResource, ListMixin, CreateMixin):
_path_segment = 'properties'
@classmethod
@allow_proxy
def update(cls, parent_resource=None, configuration=None, headers=None,
data=None, **params):
"""
Updates custom properties associated with this file.
'data' should be a list of dicts containing key/value pairs.
"""
return super(Property, cls).create(params=params,
parent_resource=parent_resource,
configuration=configuration,
method='patch', data=data,
headers=headers)
@classmethod
@allow_proxy
def delete_all(cls, parent_resource=None, configuration=None,
headers=None):
"""
Deletes all custom properties associated with this file.
"""
request(cls._api_session.delete, cls.list_path(parent_resource),
configuration=configuration, headers=headers)
return True
class User(AccountBaseResource, ReadMixin):
_path_segment = 'team/users'
def get_groups(self, headers=None, **params):
response = request(self._api_session.get, "%s/%s" %
(self.detail_path(), "memberships"),
configuration=self._configuration,
headers=headers, params=params)
data = Group.create_from_data(
response.json(), parent_resource=self._parent_resource,
configuration=self._configuration)
return AnnotatedList(data)
class Group(AccountBaseResource, ReadMixin):
_path_segment = 'team/groups'
def get_users(self, headers=None, **params):
response = request(self._api_session.get, "%s/%s" %
(self.detail_path(), "members"),
configuration=self._configuration,
headers=headers, params=params)
data = User.create_from_data(
response.json(), parent_resource=self._parent_resource,
configuration=self._configuration)
return AnnotatedList(data)
class CRMObject(AccountBaseResource, ListMixin, CreateMixin, RetrieveMixin,
UpdateMixin, DeleteMixin):
_path_segment = 'crm/objects'
raw_type = None
def __init__(self, *args, **kwargs):
super(CRMObject, self).__init__(*args, **kwargs)
@classmethod
@allow_proxy
def all(cls, parent_resource=None, configuration=None,
headers=None, **params):
if cls.raw_type is not None:
params['raw_type'] = cls.raw_type
return super(CRMObject, cls).all(parent_resource=parent_resource,
configuration=configuration,
headers=headers, **params)
@classmethod
@allow_proxy
def create(cls, params=None, parent_resource=None, configuration=None,
headers=None, method='post', data=None):
params = {} if params is None else params
if cls.raw_type is not None:
params['raw_type'] = cls.raw_type
return super(CRMObject, cls).create(params=params,
parent_resource=parent_resource,
configuration=configuration,
headers=headers,
method=method, data=data)
@classmethod
@allow_proxy
def retrieve(cls, id, parent_resource=None, configuration=None,
headers=None, **params):
if cls.raw_type is not None:
params['raw_type'] = cls.raw_type
return super(CRMObject, cls).retrieve(id,
parent_resource=parent_resource,
configuration=configuration,
headers=headers,
**params)
def save(self, **params):
# TODO: change serializer
if self.raw_type is not None:
params['raw_type'] = self.raw_type
super(CRMObject, self).save(**params)
def delete(self, **params):
if self.raw_type is not None:
params['raw_type'] = self.raw_type
super(CRMObject, self).delete(**params)
class CRMAccount(CRMObject):
_path_segment = 'crm/accounts'
raw_type = 'Account'
class CRMContact(CRMObject):
_path_segment = 'crm/contacts'
raw_type = 'Contact'
class CRMLead(CRMObject):
_path_segment = 'crm/leads'
raw_type = 'Lead'
class CRMOpportunity(CRMObject):
_path_segment = 'crm/opportunities'
raw_type = 'Opportunity'
class CRMCampaign(CRMObject):
_path_segment = 'crm/campaigns'
raw_type = 'Campaign'
class CRMTask(CRMObject):
_path_segment = 'crm/tasks'
raw_type = 'Task'
class CRMBatchRequest(AccountBaseResource, CreateMixin):
_path_segment = 'crm/batch'
class CRMSearch(AccountBaseResource, ListMixin):
_path_segment = 'crm/search'
class Application(BaseResource, ReadMixin, WriteMixin, Proxy):
def __init__(self, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
@classmethod
def list_path(cls, parent_resource):
return 'applications'
@property
def apikeys(self):
return self._get_proxy('apikey')
@property
def webhooks(self):
return self._get_proxy('webhook')
class ApplicationBaseResource(BaseResource):
_parent_resource_class = Application
def __init__(self, *applications, **kwargs):
if applications:
kwargs['parent_resource'] = applications[0]
super(ApplicationBaseResource, self).__init__(**kwargs)
@classmethod
def list_path(cls, application):
application_path = application.detail_path()
return "%s/%s" % (application_path, cls._path_segment)
class ApiKey(ApplicationBaseResource, ListMixin, CreateMixin, DeleteMixin):
_path_segment = 'apikeys'
def detail_path(self):
if not self['key']:
raise KException("The detail_path cannot be obtained since the key"
" is unknown.")
return "%s/%s" % (self.list_path(self._parent_resource), self['key'])
class WebHook(ApplicationBaseResource, ListMixin, CreateMixin, RetrieveMixin,
DeleteMixin):
_path_segment = 'webhooks'
def detail_path(self):
if not self['id']:
raise KException("The detail_path cannot be obtained since the id "
"is unknown.")
return "%s/%s" % (self.list_path(self._parent_resource), self['id'])
resources = {
'account': Account,
'file': File,
'folder': Folder,
'link': Link,
'search': Search,
'recent': Recent,
'calendars': Calendar,
'calendar_events': CalendarEvents,
'events': Events,
'multipart': Multipart,
'permission': Permission,
'property': Property,
'user': User,
'group': Group,
# CRM Endpoint
'crm_object': CRMObject,
'crm_account': CRMAccount,
'crm_contact': CRMContact,
'crm_lead': CRMLead,
'crm_opportunity': CRMOpportunity,
'crm_campaign': CRMCampaign,
'crm_task': CRMTask,
'crm_batch': CRMBatchRequest,
'crm_search': CRMSearch,
# Application Endpoint
'application': Application,
'apikey': ApiKey,
'webhook': WebHook,
}
resource_types = {v: k for k, v in six.iteritems(resources)}
| Kloudless/kloudless-python | kloudless/resources.py | Python | mit | 35,586 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from .exceptions import TransactionValidationError
AVAIABLE_PAYMENT_METHOD = ['credit_card', 'boleto']
def validate_transaction(attrs):
if len(attrs) <= 0:
raise TransactionValidationError('Need a valid attr dict')
errors = []
if 'amount' not in attrs or attrs['amount'] <= 0:
errors.append('Need to define an amount')
if 'payment_method' not in attrs:
errors.append('Need to define an valid payment_method')
if 'payment_method' in attrs:
if not attrs['payment_method'] in AVAIABLE_PAYMENT_METHOD:
errors.append(
"invalid payment_method need be boleto or credit_card")
if len(errors) > 0:
raise TransactionValidationError(', '.join(errors))
class Transaction():
def __init__(self, requester):
self.requester = requester
self.attributes = {}
def get_transactions(self, page=1):
return self.requester.commit('/transactions', {'page': page}, 'GET')
def build_transaction(self, transaction_attributes):
if not isinstance(transaction_attributes, dict):
raise TransactionValidationError(
'Transaction attributes need be an dict')
self.attributes.update(transaction_attributes)
def charge(self):
self.validate_attrs
def validate_attrs(self):
validate_transaction(self.attributes)
| devton/pagarme-py | pagarme/transaction.py | Python | mit | 1,490 |
# taken from http://www.piware.de/2011/01/creating-an-https-server-in-python/
# generate server.xml with the following command:
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
# run as follows:
# python simple-https-server.py
# then in your browser, visit:
# https://localhost:4443
import BaseHTTPServer, SimpleHTTPServer
import ssl
httpd = BaseHTTPServer.HTTPServer(('localhost', 4443), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='./server.pem', server_side=True)
httpd.serve_forever() | shawnlawson/The_Force | pythonBridge/simple-https-server.py | Python | mit | 581 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class OperationsManagementClientConfiguration(Configuration):
"""Configuration for OperationsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param provider_name: Provider name for the parent resource.
:type provider_name: str
:param resource_type: Resource type for the parent resource.
:type resource_type: str
:param resource_name: Parent resource name.
:type resource_name: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
provider_name: str,
resource_type: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if provider_name is None:
raise ValueError("Parameter 'provider_name' must not be None.")
if resource_type is None:
raise ValueError("Parameter 'resource_type' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(OperationsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.provider_name = provider_name
self.resource_type = resource_type
self.resource_name = resource_name
self.api_version = "2015-11-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-operationsmanagement/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/operationsmanagement/azure-mgmt-operationsmanagement/azure/mgmt/operationsmanagement/aio/_configuration.py | Python | mit | 4,167 |
import pdb
#!python
# -*- coding: utf-8 -*-
"""File: visual.py
Description:
This module contains visualizing method for geo data
History:
0.1.0 The first version.
"""
__version__ = '0.1.0'
__author__ = 'SpaceLis'
import dataset
from anatool.dm.db import GEOTWEET
import text_util
from operator import itemgetter
import time
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pymaps
import seaborn as sns
sns.set_palette("deep", desat=.6)
sns.set_style("white")
sns.set_context(font_scale=1.5, rc={"figure.figsize": (3, 2), 'axes.grid': False, 'axes.linewidth': 1,})
def cnt_poi(city, table='sample'):
""" Draw the tweet distribution over POIs."""
twt_lst = dataset.loadrows(GEOTWEET, ('place_id', 'count(id) as cnt'),
"superior_id='%s'" % (city), table, 'group by place_id')
def cnt_map(region, table = 'sample', draw = True):
"""Draw a region map of tweets"""
twt_lst = dataset.loadrows(GEOTWEET, ('lat', 'lng'),
('MBRContains({0}, geo)'.format(dataset.geo_rect(*region)),), table)
lat = list();
lng = list();
for twt in twt_lst:
lat.append(twt['lat'])
lng.append(twt['lng'])
if draw:
x = np.array(lng)
y = np.array(lat)
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
plt.hexbin(x,y, gridsize=200, cmap=cm.jet)
plt.axis([xmin, xmax, ymin, ymax])
plt.title("Hexagon binning")
cb = plt.colorbar()
cb.set_label('counts')
plt.show()
return lat, lng
def word_freq(twt_lst, unit = '', table = 'sample', draw = True):
"""show the word count in the twt_lst"""
dist = dict()
for twt in twt_lst:
token_dist = text_util.line2tf(text_util.fourq_filter(twt['text']))
if unit == '':
text_util.accum_dist(dist, token_dist)
elif unit == 'tweet':
for t in token_dist.iterkeys():
if t in dist:
dist[t] += 1
else:
dist[t] = 1
elif unit == 'poi':
for t in token_dist.iterkeys():
if t in dist:
dist[t].add(twt['place_id'])
else:
dist[t] = set((twt['place_id'],))
dist2 = dict()
for key in dist.iterkeys():
if unit == '' or unit == 'tweet':
if dist[key] > 1:
dist2[key] = dist[key]
elif unit == 'poi':
if len(dist[key]) > 1:
dist2[key] = len(dist[key])
del dist
sortdist = sorted(dist2.iteritems(), key=itemgetter(1), reverse=True)
if draw:
width = 0.35
idx = np.arange(len(sortdist))
plt.bar(idx, [val for key, val in sortdist])
plt.ylabel('Freq')
plt.title('Sorted Words Frequency')
#plt.xticks(idx+width/2., [key for key, val in sortdist] )
plt.show()
return sortdist
def poi_freq(twt_lst, table = 'sample', draw = True):
"""Draw a bar chart of the distribution of tweets among pois"""
dist = dict()
def geo_map(dst, data_lst):
"""Generate a HTML file to use Google maps to display"""
gmap = pymaps.PyMap()
gmap.maps[0].zoom = 5
for item in data_lst:
gmap.maps[0].setpoint([item['lat'], item['lng'], \
'{0},{1}<br>{2}'.format(item['lat'], item['lng'], item['text'])])
open(dst,'wb').write(gmap.showhtml()) # generate test file
def top_poi100_map():
plc_lst = dataset.loadrows(GEOTWEET, ('lat', 'lng', 'name'), ('superior_name=\'los angeles\'',), 'sample_dist_100')
for plc in plc_lst:
plc['text'] = plc['name']
geo_map('../la_100.html', plc_lst)
def region_dist(dst, region):
"""draw a map of region"""
plc_lst = dataset.qloadrows(GEOTWEET, 'SELECT place_id, place.name, count(place_id) as cnt, place.lat, place.lng from sample left join place on sample.place_id = place.id where MBRContains({0}, place.geo) group by place_id'.format(text_util.geo_rect(*region)))
for plc in plc_lst:
plc['text'] = plc['name'] + ',' + str(plc['cnt'])
geo_map(dst, plc_lst)
class id2int(object):
"""map ids to an int
"""
def __init__(self):
"""docstring for __init__
"""
self.idlist = dict()
def map(self, idstr):
"""map idstr to an int
"""
if idstr not in self.idlist:
self.idlist[idstr] = len(self.idlist)
return self.idlist[idstr]
def time_place_plot(user_id):
"""plot the time of each tweet in a day
"""
tims = list()
plcs = list()
idm = id2int()
rows = dataset.loadrows(GEOTWEET, ('created_at', 'place_id'), ('user_id={0}'.format(user_id),), 'sample')
for line in rows:
if line['created_at'] == None:
continue
tim = time.strptime(str(line['created_at']), '%Y-%m-%d %H:%M:%S')
plc = line['place_id']
tims.append(tim.tm_wday + tim.tm_hour/24.0)
plcs.append(idm.map(plc))
x = np.array(tims)
y = np.array(plcs)
plt.plot(x, y, 'o')
plt.title('User {0}'.format(user_id))
plt.show()
def time_plot(place_id):
"""plot the time of each tweet in a day
"""
tims = list()
plcs = list()
idm = id2int()
rows = dataset.loadrows(GEOTWEET, ('created_at',), ('place_id=\'{0}\''.format(place_id),), 'sample')
for line in rows:
if line['created_at'] == None:
continue
tim = time.strptime(str(line['created_at']), '%Y-%m-%d %H:%M:%S')
tims.append(tim.tm_wday + tim.tm_hour/24.0)
x = np.array(tims)
plt.hist(x, 42)
plt.title('Place {0}'.format(place_id))
plt.show()
if __name__ == '__main__':
#show_cnt_map(((40.67,-74.05),(40.75,-73.93)), 'tweet')
#from analyze import sampling
#dist = word_freq(sampling.sample_by_region(((40.75,-74.02),(40.70,-73.97))), 'poi')
#for i in range(100):
#print dist[i]
#top_poi100_map()
#region_dist('manhatton.html', ((40.75, -74.00), (40.745, -73.995)))
#time_plot('../data/list/38062252_time.csv')
# time_plot('ee858ad43eb4072e')
cnt_poi()
| spacelis/anatool | anatool/dm/visual.py | Python | mit | 6,377 |
import re
from threading import Thread
import time
from django.core.management.base import BaseCommand
import requests
from mittab.apps.tab.models import Round, TabSettings
from mittab.apps.tab.management.commands import utils
class Command(BaseCommand):
help = "Load test the tournament, connecting via localhost and hitting the server"
def add_arguments(self, parser):
parser.add_argument(
"--host",
dest="host",
help="The hostname of the server to hit",
nargs="?",
default="localhost:8000")
parser.add_argument(
"--connections",
dest="connections",
help="The number of concurrent connections to open",
nargs="?",
default=10,
type=int)
def handle(self, *args, **options):
cur_round = TabSettings.get("cur_round") - 1
host = options["host"]
csrf_threads = []
rounds = Round.objects.filter(round_number=cur_round, victor=Round.NONE)
for round_obj in rounds:
judge = round_obj.chair
csrf_threads.append(GetCsrfThread(host, judge.ballot_code, round_obj))
num_errors = 0
while csrf_threads:
cur_csrf_threads = []
for _ in range(min(len(csrf_threads), options["connections"])):
cur_csrf_threads.append(csrf_threads.pop())
for thr in cur_csrf_threads:
thr.start()
for thr in cur_csrf_threads:
thr.join()
result_threads = []
for thr in cur_csrf_threads:
num_errors += num_errors
csrf_token, num_errors = thr.result
if csrf_token is None:
print("no csrf token")
result_thread = SubmitResultThread(
thr.host,
thr.ballot_code,
csrf_token,
thr.round_obj)
result_threads.append(result_thread)
for thr in result_threads:
thr.start()
for thr in result_threads:
thr.join()
for thr in result_threads:
num_errors += thr.num_errors
print("Done with one batch! Sleeping!")
time.sleep(2)
print("Done!")
print("Total errors: %s" % num_errors)
class SubmitResultThread(Thread):
MAX_ERRORS = 10
def __init__(self, host, ballot_code, csrf_token, round_obj):
super(SubmitResultThread, self).__init__()
self.host = host
self.ballot_code = ballot_code
self.csrf_token = csrf_token
self.round_obj = round_obj
self.num_errors = 0
self.resp = None
def run(self):
self.resp = self.get_resp()
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
result = utils.generate_random_results(self.round_obj, self.ballot_code)
result["csrfmiddlewaretoken"] = self.csrf_token
resp = requests.post("http://%s/e_ballots/%s/" % (self.host, self.ballot_code),
result,
cookies={"csrftoken": self.csrf_token})
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
class GetCsrfThread(Thread):
REGEX = "name=\"csrfmiddlewaretoken\" value=\"([^\"]+)\""
MAX_ERRORS = 10
def __init__(self, host, ballot_code, round_obj):
super(GetCsrfThread, self).__init__()
self.num_errors = 0
self.host = host
self.ballot_code = ballot_code
self.round_obj = round_obj
self.result = (None, None)
def run(self):
resp = self.get_resp()
if resp is None:
self.result = (None, self.num_errors)
else:
csrf = re.search(self.REGEX, resp).group(1)
self.result = (csrf, self.num_errors)
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
resp = requests.get("http://%s/e_ballots/%s" % (self.host, self.ballot_code))
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
| jolynch/mit-tab | mittab/apps/tab/management/commands/load_test.py | Python | mit | 4,345 |
# coding:utf-8
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
from arrow_time import today_date_for_influxd_sql
from arrow_time import ten_day_ago_for_influxd_sql
from influxdb_function import influxDB_interface
from aircraft_config import AC_WQAR_CONFIG
import json
def home(request):
date_start = today_date_for_influxd_sql()
date_end = today_date_for_influxd_sql()
all_aircraft_list = json.dumps(AC_WQAR_CONFIG().all_aircraft())
where_str = " WHERE time > " + "'" + date_start + "'" + " - 8h" + " AND time < " + "'" + date_end + "'" + " + 16h"
infdb_if = influxDB_interface()
sector_index = infdb_if.inf_query("DB_sector_index", "*", "index", where_str)
if sector_index <> {}:
df = sector_index['index']
result_json = df.to_json(orient="records")
return render(request, 'home.html', {'result_json': result_json,
'all_ac':all_aircraft_list})
else:
return render(request, 'home.html', {'result_json': {},
'all_ac':all_aircraft_list})
def guide(request):
return render(request, 'guide.html')
| waterwoodwind/influxDB_web | main_web/views.py | Python | mit | 1,205 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test txindex generation and fetching
#
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class TxIndexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-txindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-txindex"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
print("Testing transaction index...")
privkey = "cU4zhap7nPJAWeMFu4j6jLrfPmqakDAzy8zn8Fhb3oEevdm4e5Lc"
address = "yeMpGzMj3rhtnz48XsfpB8itPHhHtgxLc3"
addressHash = binascii.unhexlify("C5E4FB9171C22409809A3E8047A29C83886E325D")
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = unspent[0]["amount"] * 100000000
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
# Check verbose raw transaction results
verbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(verbose["vout"][0]["valueSat"], 5000000000);
assert_equal(verbose["vout"][0]["value"], 50);
print("Passed\n")
if __name__ == '__main__':
TxIndexTest().main()
| terracoin/terracoin | qa/rpc-tests/txindex.py | Python | mit | 2,703 |
#!/usr/bin/python
#By Rhys McCaig @mccaig / mccaig@gmail.com
import sys
import json
import logging
import re
from datetime import datetime, timedelta
import xml.etree.ElementTree as ET
import thread
import numpy
import requests
class NikePlus:
"""Class for working with the Nike+ API"""
#Nike+ Settings
#You might need to adjust these three below in the future to match your developer credentials. These work fine as of August 2013.
nikeplus_app_id = 'ONEPLUSSDKSAMPLE'
nikeplus_client_id = 'c002b5e3fd045be3bf357c8534edb38b'
nikeplus_client_secret = 'd7c94a86a6a389c4'
#Probably shouldnt touch anything below here, unless something drastically changes
nikeplus_base_url = 'https://api.nike.com'
nikeplus_headers = { 'Appid' : nikeplus_app_id,
'Accept' : 'application/json'}
nikeplus_authentication_parameters = {'app' : nikeplus_app_id,
'client_id' : nikeplus_client_id,
'client_secret' : nikeplus_client_secret}
nikeplus_endpoints = { 'aggregate_sports_data' : nikeplus_base_url+'/me/sport',
'list_activities' : nikeplus_base_url+'/me/sport/activities',
'activity_detail' : nikeplus_base_url+'/me/sport/activities/%(activity_id)s',
'gps_data' : nikeplus_base_url+'/me/sport/activities/%(activity_id)s/gps',
'login' : nikeplus_base_url+'/nsl/v2.0/user/login'}
nikeplus_activity_list_limit = 50 #API shits itself if we set this too high
nikeplus_timeout_seconds = 30
def __init__(self):
self.logger = logging.getLogger('nikeplus.NikePlus')
self.id = id(self)
self.logger.info('[{i}] {f}({m})'.format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Creating Instance"))
self.cookie = None
self.authentication_data = {}
self.token = None
self.session = requests.Session()
self.session.headers.update(self.nikeplus_headers)
self.aggregate_data = None
self.activities = {}
return
#Authentication - Only implement Nike+ Login at this stage (email+password)
def Authenticate(self,username=None,password=None):
if username == None:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="No Username"))
raise NikePlusError("Cant Authenticate() without a Username")
if password == None:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="No Password"))
raise NikePlusError("Cant Authenticate() without a Password")
authentication_payload = {'email':username,'password':password}
r = self.session.post(self.nikeplus_endpoints['login'], data=authentication_payload, params=self.nikeplus_authentication_parameters, headers=self.nikeplus_headers,timeout=self.nikeplus_timeout_seconds)
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Request: "+str([r.url,r.request.headers])))
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Response: "+str([r.headers,r.text])))
try:
self.authentication_data = get_json_auth_data(r)
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Setting Token: "+str(self.authentication_data['access_token'])))
self.token = self.authentication_data['access_token']
except:
#If json parsing fails then it was probably a bad username/password combo
self.logger.warn("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m=r.url+" "+str(sys.exc_info()[0])))
self.Token = None
return self
#You can manually set an oAuth token here if you have one from a previous session
#Get aggregate workout data
def RetrieveAggregateData(self):
if self.token is None:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="No Auth Token set"))
raise NikePlusError("No Auth Token - Try to Authenticate() or manually set a NikePlus.token first")
else:
parameters = {'access_token':self.token}
r = self.session.get(self.nikeplus_endpoints['aggregate_sports_data'], params=parameters, headers=self.nikeplus_headers,timeout=self.nikeplus_timeout_seconds)
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Request: "+str([r.url,r.request.headers])))
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Response: "+str([r.headers,r.text])))
try:
self.aggregate_data = r.json()
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Retrieved Data"))
except:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m=r.url+" "+str(sys.exc_info()[0])))
raise
return self
#Retrieve activity list/summaries
def RetrieveActivities(self,limit=100000,offset=1):
count = self.nikeplus_activity_list_limit #Nike+ API shits itself if you try to request more than 500 or so, keeping this relatively low to be safe. We have to page through results.
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retrieving activities in batches of {c}".format(c=count)))
activities = {}
eof = False
if self.token is None:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="No Auth Token set"))
raise NikePlusError("No Auth Token - Try to Authenticate() or manually set a NikePlus.token first")
else:
while (len(activities)<limit and not eof):
parameters = {'access_token':self.token,'count':count,'offset':offset}
r = self.session.get(self.nikeplus_endpoints['list_activities'], params=parameters, headers=self.nikeplus_headers,timeout=self.nikeplus_timeout_seconds)
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Request: "+str([r.url,r.request.headers])))
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Response: "+str([r.headers,r.text])))
try:
data = r.json()
if "data" in data:
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retreived {c} activities".format(c=len(data['data']))))
for a in data['data']:
if len(activities)<limit:
activities[str(a['activityId'])] = a #so we can search by activityId
activities[str(a['activityId'])]['gps'] = None
if len(data['data']) == 0 or len(activities) % count != 0:
eof = True
else:
eof = True
offset += count
except:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m=r.url+" "+str(sys.exc_info()[0])))
raise
self.activities = activities
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retreived a total of {c} activities".format(c=len(self.activities))))
return self
#Downloads details for an individual Activity
def GetActivityDetails(self,activity_id):
if self.token is None:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="No Auth Token set"))
raise NikePlusError("No Auth Token - Try to Authenticate() or manually set a NikePlus.token first")
else:
self.activities.setdefault(str(activity_id),{}) #If not being invoked after a RetrieveActivities, (if manually setting the activity_id) activity_id wont yet be set
parameters = {'access_token':self.token,'activityId':activity_id}
#Note we have to replace activityID into the endpoint in the below code
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Request: "+ self.nikeplus_endpoints['activity_detail'] % {'activity_id' : activity_id}))
r = self.session.get(self.nikeplus_endpoints['activity_detail'] % {'activity_id' : activity_id}, params=parameters, headers=self.nikeplus_headers,timeout=self.nikeplus_timeout_seconds)
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Response: "+ self.nikeplus_endpoints['activity_detail'] % {'activity_id' : activity_id}))
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Request: "+str([r.url,r.request.headers])))
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Response: "+str([r.headers,r.text])))
npa = NikePlusActivity(activity_id)
try:
#Attach the detail to the existing activity
if "errorCode" in r.json().keys() or "error_id" in r.json().keys():
self.logger.warn("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Error getting detail for activity id {a}".format(a=activity_id)))
else:
npa.AddDetail(r.json())
self.activities[str(activity_id)]['detail'] = True
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retrieved detail for activity id {a}".format(a=activity_id)))
except:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Failure to attach detail to activity id {a}. Error {e}".format(a=activity_id, e=str(sys.exc_info()[0]))))
raise
#Now to retrieve GPS (if there is any)
parameters = {'access_token':self.token,'activityId':activity_id}
#Note we have to replace activityID into the endpoint in the below code
r = self.session.get(self.nikeplus_endpoints['gps_data'] % {'activity_id' : activity_id}, params=parameters, headers=self.nikeplus_headers,timeout=self.nikeplus_timeout_seconds)
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Request: "+str([r.url,r.request.headers])))
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Response: "+str([r.headers,r.text])))
try:
#Attach the detail to the existing activity
if "errorCode" in r.json().keys() or "error_id" in r.json().keys():
self.activities[str(activity_id)]['gps'] = False
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="No GPS data available for activity id {a}".format(a=activity_id)))
else:
self.activities[str(activity_id)]['gps'] = True
self.logger.info("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retrieving GPS data for activity id {a}".format(a=activity_id)))
npa.AddGPS(r.json())
npa.gps = self.activities[str(activity_id)]['gps']
except:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Failure to attach detail to activity " + r.url+" "+str(sys.exc_info()[0])))
raise
return npa
#Download the details for all activities
def GetBulkActivityDetails(self):
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Retrieving list of activities"))
npaList = []
for a in self.GetActivityIds():
npalist.append(self.RetrieveActivityDetails(a))
return npaList
#Get a list of available Activity ID's
def GetActivityIds(self):
if len(self.activities) == 0:
self.RetrieveActivities()
if len(self.activities) == 0:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="No activities for this account"))
raise NikePlusError("There are no activities for this account")
self.logger.debug("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m=str(self.activities.keys())))
return self.activities.keys()
#Return the sumary data for an individual activity
def GetActivitySummary(self,activity_id):
if len(self.activities) == 0:
self.RetrieveActivities()
if len(self.activities) == 0:
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="No activities for this account"))
raise NikePlusError("There are no activities for this account")
if activity_id not in self.activities.keys():
self.logger.error("[{i}] [{t}] [{f}] ({m})".format(i=self.id, t=thread.get_ident(), f=str(sys._getframe().f_code.co_name), m="Activity ID {a} not found on this account".format(a=activity_id)))
raise NikePlusError("Activity ID {a} not found!".format(a=activity_id))
return self.activities[activity_id]
class NikePlusActivity:
"""Class for working with a single nikeplus activity data set"""
nikeplus_interpolatable_metrics = ['DISTANCE','SMOOTHED_DISTANCE']
def __init__(self, a_id=None):
self.logger = logging.getLogger('nikeplus.NikePlusActivity')
self.id = id(self)
self.logger.info('[{i}] {f}({m})'.format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Creating Instance for activity id {a}".format(a=a_id)))
self.activity_id = a_id
self.data = {}
self.data['timeSeries'] = {}
self.detail = False
self.gps = None
self.duration = 0
self.distance = 0
self.calories = 0
self.distance_smoothing_window = 1
self.start_datetime = None
return
#No Validation for now - we just blindly add whatever data elements that are passed
def AddDetail(self,d={}):
try:
for (k,v) in d.iteritems():
self.data[k] = v
self.activity_type = self.data['activityType']
self.start_time = self.data['startTime']
self.start_datetime = datetime.strptime(self.start_time, '%Y-%m-%dT%H:%M:%SZ')
self.duration = timestring_to_milliseconds(self.data['metricSummary']['duration'])
self.distance = self.data['metricSummary']['distance']
self.calories = self.data['metricSummary']['calories']
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Activity ID {a} has deviceType {d}".format(a=self.activity_id,d=self.data['deviceType'])))
self._SetSmoothingWindow()._AddSmoothedDistance()._AddDataToTimeSeries()._Interpolate()
except:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error adding detail to Activity ID {a}".format(a=self.activity_id)))
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error {e}, Detail Data: {data}".format(e=sys.exc_info()[0],data=d)))
raise
self.detail = True
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Added detail for activity id {a}".format(a=self.activity_id)))
return self
#No Validation for now - we just blindly add whatever data elements that are passed
def AddGPS(self,g={}):
try:
self.data['gpsmetrics'] = [g] #Keeping it in the same format as metrics
self._AddGPSDataToTimeSeries()._Interpolate()
except:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error adding gps to Activity ID {a}".format(a=self.activity_id)))
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error {e}, GPS Data: {data}".format(e=sys.exc_info()[0],data=g)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Added gps for activity id {a}".format(a=self.activity_id)))
self.gps = True
return self
#Add timeseries data in MILLISECONDS
def _AddDataToTimeSeries(self):
try:
for m in self.data['metrics']:
if m['intervalUnit'] in ['SEC','MIN']: #Dont currently know how to handle it if we arent dealing in seconds
multiplier = 1000 #convert seconds to milliseconds
if m['intervalUnit'] == "MIN":
multiplier = 60000 #convert minutes to milliseconds
time = 0
for d in m['values']:
self.data['timeSeries'].setdefault(int(time), {})[m['metricType']] = d #really just self.data['timeSeries']['metric'] = d with setdefault
time += m['intervalMetric'] * 1000
else:
self.logger.warn("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Dont know how to handle metric ({metric}) with intervalUnit: ({i}) for activity id {a}".format(a=activity_id,i=m['intervalUnit'],metric=m['metricType'])))
#Add final time and distance
self.data['timeSeries'].setdefault(int(self.duration), {})['DISTANCE'] = str(self.distance)
self.data['timeSeries'].setdefault(int(self.duration), {})['SMOOTHED_DISTANCE'] = str(self.distance)
except:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error adding data to time series for activity id {a}".format(a=self.activity_id)))
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error: {e}, self.data: {data}".format(e=sys.exc_info()[0],data=self.data)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Added detail data to time series for activity id {a}".format(a=self.activity_id)))
return self
#Add timeseries data in MILLISECONDS
#Nike+ doesnt seem to return the real interval value for waypoints, so we'll assume they are uniform and calculate the intervals ourselves
def _AddGPSDataToTimeSeries(self):
try:
for m in self.data['gpsmetrics']:
for i in range(len(m['waypoints'])):
#add the waypoints to the time series (using setdefault to create the time entry)
self.data['timeSeries'].setdefault(int(self.duration * (float(i) / (len(m['waypoints']) - 1))), {})['WAYPOINT'] = m['waypoints'][i]
except:
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error gps to time series for activity id {a}".format(a=self.activity_id)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Added gps to time series for activity id {a}".format(a=self.activity_id)))
return self
#Dont know why i bothered with this. But sure, it returns a list of workout data time series keys
def GetTimeSeries(self):
return sorted(self.data['timeSeries'].keys())
#Return a the specific metric in [time,metric] format
def GetTimeSeriesDataByMetric(self,metric):
d = {}
for k in self.GetTimeSeries():
if metric in self.data['timeSeries'][k]:
d[k] = self.data['timeSeries'][k][metric]
return d
#Return a copy of the summary data
def GetMetricSummary(self):
return self.data['metricSummary'].copy()
#Returns true if the specified metric is available
def HasMetric(self,metric):
for m in self.data['metrics']:
if metric == m['metricType']:
return True
return False
#Kinda private, as we always want to add this after adding detail, but thats about it.
def _AddSmoothedDistance(self):
try:
smoothed = []
if self.HasMetric("DISTANCE"):
for m in self.data['metrics']:
if m['metricType'] == "DISTANCE":
#Distance array is unicode, cast it to float before smoothing, cast result back to unicode
smoothed = [unicode(f) for f in smooth_array([float(u) for u in m['values']],self.distance_smoothing_window)]
self.data['metrics'].append({ 'metricType': 'SMOOTHED_DISTANCE',
'intervalMetric': m['intervalMetric'],
'intervalUnit': m['intervalUnit'],
'values': smoothed
})
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Added SMOOTHED_DISTANCE metric for activity id {a}".format(a=self.activity_id)))
else:
self.logger.warn("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Activity {a} has no distance metric!".format(a=self.activity_id)))
except: #What could possibly go wrong?
self.logger.error("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Error adding smoothed distance for activity id {a}".format(a=self.activity_id)))
raise
return self
#Figure out what sliding window size we want to use for smoothing distance metrics (for low res treadmills)
def _SetSmoothingWindow(self):
s = 0
decimals = 0
try:
for m in self.data['metrics']:
if m['metricType'] == "DISTANCE":
for d in m['values']:
if len(d.split('.')[1]) > decimals:
decimals = len(d.split('.')[1])
except:
self.logger.warn("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Exception in _SetSmoothingWindow for activity id {a}".format(a=self.activity_id)))
pass
if decimals == 0:
self.distance_smoothing_window = 19
elif decimals == 1:
self.distance_smoothing_window = 13
elif decimals == 2:
self.distance_smoothing_window = 7
elif decimals == 3:
self.distance_smoothing_window = 3
else:
self.distance_smoothing_window = 1
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Set a smoothing window of {w} for activity id {a}".format(w=self.distance_smoothing_window,a=self.activity_id)))
return self
#Some tools fall over if there isnt a distance supplied in every TCX entry. So we need to interpolate the distance if our other metrics have a different interval to Distance
#Note we use floats here as required by numpy
def _Interpolate(self):
try:
for metric in self.nikeplus_interpolatable_metrics:
dataset = self.GetTimeSeriesDataByMetric(metric)
#Get an array of the x values (time) of our data points for interpolation
x_points = [float(u) for u in sorted(dataset.keys())]
#Get an array of the y values (distance) of our data points for interpolation
y_points = [dataset[u] for u in sorted(dataset.keys())]
#Get an array of the x coordinates we want to interpolate onto (all of the time series)
x_coords = [float(u) for u in self.GetTimeSeries()]
#interpolate onto y values
y_coords = numpy.interp(x_coords,x_points,y_points)
#Update our interpolated values into our time series array
for i in range(len(x_coords)):
self.data['timeSeries'][int(x_coords[i])][metric] = unicode(y_coords[i])
except:
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Something went horribly wrong during interpolation for activity id {a}".format(a=self.activity_id)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Interpolation Success for activity id {a}".format(a=self.activity_id)))
return self
#Spitting out our data as JSON is pretty easy, especially if we dont want to do any special formatting.
def AsJSON(self):
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="JSON export for activity id {a}".format(a=self.activity_id)))
json_out = json.dumps(self.data,sort_keys=True,indent=2,separators=(',', ': '))
return json_out
#Generate GPX file
def AsGPX(self):
try:
GPX = ET.Element('gpx')
GPX.set('xmlns','http://www.topografix.com/GPX/1/1')
GPX.set('xmlns:tc2','http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2')
GPX.set('xmlns:xsi','http://www.w3.org/2001/XMLSchema-instance')
GPX.set('xmlns:tp1','http://www.garmin.com/xmlschemas/TrackPointExtension/v1')
GPX.set('version','1.1')
GPX.set('creator','Nike+ to GPX exporter (@mccaig)')
GPX.set('xsi:schemaLocation','http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd')
GPXtrk = ET.SubElement(GPX,'trk')
ET.SubElement(GPXtrk,'name').text = str(self.start_time)
GPXtrkseg = ET.SubElement(GPXtrk,'trkseg')
for i in self.GetTimeSeries():
time_slice = self.data['timeSeries'][i]
if "WAYPOINT" in time_slice: #We need a waypoint for a record
GPXtrkpt = ET.SubElement(GPXtrkseg,'trkpt')
GPXtrkpt.set('lat',str(time_slice["WAYPOINT"]["latitude"]))
GPXtrkpt.set('lon',str(time_slice["WAYPOINT"]["longitude"]))
ET.SubElement(GPXtrkpt,'ele').text = str(time_slice["WAYPOINT"]['elevation'])
ET.SubElement(GPXtrkpt,'time').text = (self.start_datetime + timedelta(milliseconds=int(i))).isoformat() + "Z"
GPXextensions = ET.SubElement(GPXtrkpt,'extensions')
if "HEARTRATE" in time_slice:
GPXHR = ET.SubElement(GPXextensions,'tp1:TrackPointExtension')
ET.SubElement(GPXHR,'tp1:hr').text = time_slice["HEARTRATE"]
gpx_out = '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n' + ET.tostring(GPX)
except:
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="GPX generation failed for activity id {a}".format(a=self.activity_id)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Exported GPX for activity id {a} [{c} characters]".format(a=self.activity_id,c=len(gpx_out))))
return gpx_out
#Generate TCX File
def AsTCX(self):
try:
TCX = ET.Element('TrainingCenterDatabase')
TCX.set('xmlns','http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2')
TCX.set('xmlns:xsi','http://www.w3.org/2001/XMLSchema-instance')
TCX.set('xsi:schemaLocation','http://www.garmin.com/xmlschemas/ActivityExtension/v2 http://www.garmin.com/xmlschemas/ActivityExtensionv2.xsd http://www.garmin.com/xmlschemas/FatCalories/v1 http://www.garmin.com/xmlschemas/fatcalorieextensionv1.xsd http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd')
ET.SubElement(TCX,'Folders')
TCXActivities = ET.SubElement(TCX,'Activities')
TCXActivity = ET.SubElement(TCXActivities,'Activity')
TCXActivity.set('Sport',self._GetGarminSport())
ET.SubElement(TCXActivity,'Id').text = str(self.start_time)
TCXLap = ET.SubElement(TCXActivity,'Lap')
TCXLap.set('StartTime',self.start_time)
ET.SubElement(TCXLap,'TotalTimeSeconds').text = str(self.duration / 1000)
ET.SubElement(TCXLap,'DistanceMeters').text = str(self.distance * 1000)
ET.SubElement(TCXLap,'Calories').text = str(self.calories)
ET.SubElement(TCXLap,'Intensity').text = "Resting"
ET.SubElement(TCXLap,'TriggerMethod').text = "Manual"
TCXTrack = ET.SubElement(TCXLap,'Track')
for i in self.GetTimeSeries():
TCXTrackpoint = ET.SubElement(TCXTrack,'Trackpoint')
ET.SubElement(TCXTrackpoint,'Time').text = (self.start_datetime + timedelta(milliseconds=int(i))).isoformat() + "Z"
time_slice = self.data['timeSeries'][i]
if "WAYPOINT" in time_slice:
TCXPosition = ET.SubElement(TCXTrackpoint,'Position')
ET.SubElement(TCXPosition,'LatitudeDegrees').text = str(time_slice["WAYPOINT"]['latitude'])
ET.SubElement(TCXPosition,'LongitudeDegrees').text = str(time_slice["WAYPOINT"]['longitude'])
ET.SubElement(TCXTrackpoint,'AltitudeMeters').text = str(time_slice["WAYPOINT"]['elevation'])
if "SMOOTHED_DISTANCE" in time_slice:
ET.SubElement(TCXTrackpoint,'DistanceMeters').text = str(float(time_slice["SMOOTHED_DISTANCE"]) * 1000) #Kilometres to Metres
if "HEARTRATE" in time_slice:
TCXHeartRate = ET.SubElement(TCXTrackpoint,'HeartRateBpm')
TCXHeartRate.set("xsi:type","HeartRateInBeatsPerMinute_t")
ET.SubElement(TCXHeartRate,'Value').text = str(time_slice["HEARTRATE"])
TCXExtensions = ET.SubElement(TCXTrackpoint,'Extensions')
if "SPEED" in time_slice or "CADENCE" in time_slice:
TCXTPX = ET.SubElement(TCXExtensions,'TPX')
TCXTPX.set('xmlns','http://www.garmin.com/xmlschemas/ActivityExtension/v2')
TCXTPX.set('CadenceSensor','Footpod')
if "SPEED" in time_slice:
ET.SubElement(TCXTPX,'Speed').text = str(float(time_slice["SPEED"])/3.6) #Convert from Nike+ KPH to M/S
if "CADENCE" in time_slice:
#Garmin Cadence is times one foot hits the ground, nike+ cadence is in steps, so we need to divide by 2.
ET.SubElement(TCXTPX,'RunCadence').text = str(int(float(time_slice["CADENCE"])/2))
else:
TCXExtensions.text = "\n"
tcx_out = '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n' + ET.tostring(TCX)
except:
self.logger.debug("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="TCX generation for activity id {a} failed".format(a=self.activity_id)))
raise
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Exported TCX for activity id {a} [{c} characters]".format(a=self.activity_id,c=len(tcx_out))))
return tcx_out
#TODO: Generate Mock GPS data for runs without any location info.
def AddMockGPSData(self):
pass
#Map to the garmin defined sports in GPX/TCX files. If this is enhanced in the future should make it a lookup rather than hardcoding
def _GetGarminSport(self):
if self.activity_type == "RUN":
self.logger.info("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Detected 'garmin' sport for activity id {a} [{t}=Running]".format(a=self.activity_id,t=self.activity_type)))
return "Running"
#elif False: #May run other mappings later (Cycling?)
#return self.activity_type
else:
self.logger.info ("[{i}] {f}({m})".format(i=self.id, f=str(sys._getframe().f_code.co_name), m="Failed to detect 'garmin' sport for activity id {a} ({t})".format(a=self.activity_id,t=self.activity_type)))
return self.activity_type
class NikePlusError(Exception):
pass
def timestring_to_milliseconds(s):
hours, minutes, seconds, milliseconds = re.split(':|\.',s)
return int(milliseconds) + (int(seconds) * 1000) + (int(minutes) * 60000) + (int(hours) * 3600000)
#From http://wiki.scipy.org/Cookbook/SignalSmooth
#Could probably do a bit better here. But for now im kinda lazy, so here it is in a lone function for now.
def smooth_array(a,window_len=11,window='flat'):
x = numpy.array(a)
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
window_len = x.size-1
#raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len-1::-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len:-window_len+1].tolist()
#Fix for nike+ sometimes returning non JSON data when their XML/JSON conversion fails
def get_json_auth_data(resp):
valid_strings = ['access_token','refresh_token','expires_in']
try:
a = r.json()
#If we got bad json back.... Either the user didnt log in or nike screwed up the XML to JSON conversions
except:
t = "{\n"
for line in resp.iter_lines():
for string in valid_strings:
if string in line:
t = t + line.strip() + "\n"
t = t + '"failed_json_parse":"true"\n}'
a = json.loads(t)
return a
| rhysmccaig/blue-ribbon-plus | nikeplus/nikeplus.py | Python | mit | 31,565 |
from codecs import open
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='serpy',
version='0.3.1',
description='ridiculously fast object serialization',
long_description=long_description,
url='https://github.com/clarkduvall/serpy',
author='Clark DuVall',
author_email='clark.duvall@gmail.com',
license='MIT',
install_requires=['six'],
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=('serialization', 'rest', 'json', 'api', 'marshal',
'marshalling', 'validation', 'schema', 'fast'),
packages=find_packages(exclude=[
'contrib',
'docs',
'tests*',
'benchmarks'
]),
)
| clarkduvall/serpy | setup.py | Python | mit | 1,452 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MetricsAdvisorConfiguration
from .operations import MetricsAdvisorOperationsMixin
from .. import models
class MetricsAdvisor(MetricsAdvisorOperationsMixin):
"""Microsoft Azure Metrics Advisor REST API (OpenAPI v2).
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://:code:`<resource-name>`.cognitiveservices.azure.com).
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}/metricsadvisor/v1.0'
self._config = MetricsAdvisorConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MetricsAdvisor":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/aio/_metrics_advisor.py | Python | mit | 3,345 |
#!/usr/bin/python
import sys
import argparse
def loopfile(polyfile):
polyline=''
with open(polyfile,'r') as f:
read_data = f.readlines()
f.close()
polylines = []
polyline = ""
countend = 0
# countpoly = 0
for l in read_data:
s = l.strip()
if len(s) > 1:
try:
x = ''
y = ''
if int(s[0]) > -180:
xy = s.split(" ")
x = xy[0]
for v in xy:
if x != v:
if len(v) > 1:
if int(v[0])> -90:
y = v
polyline += x + " " + y + ","
except ValueError:
pass
if len(s) == 1:
polyline = "("
# countpoly += 1
# if countpoly == 1:
# polyline = "POLYGON (("
#
# else:
# polyline = "POLYGON (("
if s == "END":
countend += 1
if (countend%2) == 1:
polyline = polyline[0:len(polyline)-1]
polyline += ")" #)"
polylines.append(polyline)
return polylines
def createwkt(polylines):
polygon=""
if len(polylines) >0:
polygon = "POLYGON ("
for p in polylines:
polygon += p +","
polygon = polygon[0:len(polygon)-1]
polygon += ")"
return polygon
parser = argparse.ArgumentParser(description='convert a .poly file in .wkt format')
parser.add_argument('infile', metavar='infile', type=str,
help='inputfile')
parser.add_argument('-o',dest='outfile', type=str,help='output file',default=None)
parser.add_argument('-i', '--insertsql', dest='sqlstring',default=False,action='store_true',
help='create insert sql string')
parser.add_argument('-c', '--createtable', dest='createtable',default=False,action='store_true',
help='create sql string with create table')
parser.add_argument('-t', '--tablename', dest='tablename',default='poly',action='store_true',
help='to assign a name of the table (default=poly')
parser.add_argument('-s','--silent', dest='silent',help="dont'show output, if you don't need a output file this is si default",
action='store_true',default=False)
args = parser.parse_args()
tablename = args.tablename
wkt = createwkt(loopfile(args.infile))
out = None
if (args.sqlstring):
out = "INSERT INTO %s (geom) values (GeometryFromText('%s'),4326))\n;" % (tablename,wkt)
if (args.createtable):
out = "CREATE TABLE %s (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT);\n" % (tablename)
out += "SELECT AddGeometryColumn('%s', 'geom', 4326, 'POLYGON', 2);\n" % (tablename)
out += "INSERT INTO %s (geom) values (GeometryFromText('%s',4326));\n" % (tablename,wkt)
if out == "":
out = wkt
if (out is None):
out = wkt
if (args.outfile is not None):
with open(args.outfile,'w') as f:
f.write(out)
f.close()
if (args.silent == False):
print out
| napo/poly2wkt | poly2wkt.py | Python | mit | 2,953 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# try:
# from setuptools import setup
# except ImportError:
# from distutils.core import setup
#from setuptools import setup
#from setuptools import Extension
from numpy.distutils.core import setup
from numpy.distutils.extension import Extension
import os
import glob
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'sphinx',
'sphinx-fortran',
'numpy',
'scipy',
'pandas',
'matplotlib',
'PyYAML',
'utm'
]
test_requirements = [
'tox',
'pytest',
'coverall',
]
setup(
name='fusedwake',
version='0.1.0',
description="A collection of wind farm flow models for FUSED-Wind",
long_description=readme + '\n\n' + history,
author="Pierre-Elouan Rethore",
author_email='pire@dtu.dk',
url='https://github.com/DTUWindEnergy/FUSED-Wake',
packages=[
'fusedwake',
'fusedwake.gcl',
'fusedwake.gcl.python',
'fusedwake.noj',
# 'fusedwake.noj.python',
'fusedwake.gau',
# 'fusedwake.gau.python',
#'fusedwake.ainslie',
# 'fusedwake.ainslie.python',
#'fusedwake.sdwm',
],
package_dir={'fusedwake':
'fusedwake'},
include_package_data=True,
install_requires=requirements,
license="GNU Affero v3",
zip_safe=False,
keywords='fusedwake',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero v3',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
ext_package='fusedwake',
ext_modules=[Extension('gcl.fortran',
glob.glob(os.path.join('fusedwake', 'gcl', 'fortran',
'GCL.f'))),
Extension('noj.fortran',
glob.glob(os.path.join('fusedwake', 'noj', 'fortran',
'NOJ.f'))),
Extension('noj.fortran_mod',
glob.glob(os.path.join('fusedwake', 'noj', 'fortran',
'Mod_NOJ.f'))),
Extension('gau.fortran',
glob.glob(os.path.join('fusedwake', 'gau', 'fortran',
'GAU.f')))],
)
| DTUWindEnergy/FUSED-Wake | setup.py | Python | mit | 2,875 |
#!/usr/bin/env python
"""
PURPOSE: The routines in this file test the get_neighborhoods module.
Created on 2015-04-02T21:24:17
"""
from __future__ import division, print_function
#import numpy as np
#from types import *
#from nose.tools import raises
#import pandas as pd
import nhrc2.backend.read_seeclickfix_api_to_csv as rscf
from nhrc2.backend import get_neighborhoods as get_ngbrhd
__author__ = "Matt Giguere (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "Matt Giguere"
__email__ = "matthew.giguere@yale.edu"
__status__ = " Development NOT(Prototype or Production)"
#make sure the number of neighborhoods is equal to the number of issues.
def test_get_neighborhoods():
"""
Ensure the number in the hood list length = the number of issues
"""
scf_cats = rscf.read_categories(readfile=True)
issues = rscf.read_issues(scf_cats, readfile=True)
hoods = get_ngbrhd.get_neighborhoods()
assert len(issues) == len(hoods)
#@raises(ValueError)
#def test_make_function_raise_value_error():
| newhavenrc/nhrc2 | tests/test_get_neighborhoods.py | Python | mit | 1,060 |
from typing import Optional, Any, List, Union
from enum import Enum
from pathlib import Path
from wasabi import Printer
import srsly
import re
import sys
import itertools
from ._util import app, Arg, Opt
from ..training import docs_to_json
from ..tokens import DocBin
from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs
from ..training.converters import conllu_to_docs
# Converters are matched by file extension except for ner/iob, which are
# matched by file extension and content. To add a converter, add a new
# entry to this dict with the file extension mapped to the converter function
# imported from /converters.
CONVERTERS = {
"conllubio": conllu_to_docs,
"conllu": conllu_to_docs,
"conll": conllu_to_docs,
"ner": conll_ner_to_docs,
"iob": iob_to_docs,
"json": json_to_docs,
}
# File types that can be written to stdout
FILE_TYPES_STDOUT = ("json",)
class FileTypes(str, Enum):
json = "json"
spacy = "spacy"
@app.command("convert")
def convert_cli(
# fmt: off
input_path: str = Arg(..., help="Input file or directory", exists=True),
output_dir: Path = Arg("-", help="Output directory. '-' for stdout.", allow_dash=True, exists=True),
file_type: FileTypes = Opt("spacy", "--file-type", "-t", help="Type of data to produce"),
n_sents: int = Opt(1, "--n-sents", "-n", help="Number of sentences per doc (0 to disable)"),
seg_sents: bool = Opt(False, "--seg-sents", "-s", help="Segment sentences (for -c ner)"),
model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"),
morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"),
merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"),
converter: str = Opt("auto", "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"),
ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True),
lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"),
concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"),
# fmt: on
):
"""
Convert files into json or DocBin format for training. The resulting .spacy
file can be used with the train command and other experiment management
functions.
If no output_dir is specified and the output format is JSON, the data
is written to stdout, so you can pipe them forward to a JSON file:
$ spacy convert some_file.conllu --file-type json > some_file.json
DOCS: https://spacy.io/api/cli#convert
"""
if isinstance(file_type, FileTypes):
# We get an instance of the FileTypes from the CLI so we need its string value
file_type = file_type.value
input_path = Path(input_path)
output_dir = "-" if output_dir == Path("-") else output_dir
silent = output_dir == "-"
msg = Printer(no_print=silent)
verify_cli_args(msg, input_path, output_dir, file_type, converter, ner_map)
converter = _get_converter(msg, converter, input_path)
convert(
input_path,
output_dir,
file_type=file_type,
n_sents=n_sents,
seg_sents=seg_sents,
model=model,
morphology=morphology,
merge_subtokens=merge_subtokens,
converter=converter,
ner_map=ner_map,
lang=lang,
concatenate=concatenate,
silent=silent,
msg=msg,
)
def convert(
input_path: Union[str, Path],
output_dir: Union[str, Path],
*,
file_type: str = "json",
n_sents: int = 1,
seg_sents: bool = False,
model: Optional[str] = None,
morphology: bool = False,
merge_subtokens: bool = False,
converter: str = "auto",
ner_map: Optional[Path] = None,
lang: Optional[str] = None,
concatenate: bool = False,
silent: bool = True,
msg: Optional[Printer],
) -> None:
if not msg:
msg = Printer(no_print=silent)
ner_map = srsly.read_json(ner_map) if ner_map is not None else None
doc_files = []
for input_loc in walk_directory(Path(input_path), converter):
input_data = input_loc.open("r", encoding="utf-8").read()
# Use converter function to convert data
func = CONVERTERS[converter]
docs = func(
input_data,
n_sents=n_sents,
seg_sents=seg_sents,
append_morphology=morphology,
merge_subtokens=merge_subtokens,
lang=lang,
model=model,
no_print=silent,
ner_map=ner_map,
)
doc_files.append((input_loc, docs))
if concatenate:
all_docs = itertools.chain.from_iterable([docs for _, docs in doc_files])
doc_files = [(input_path, all_docs)]
for input_loc, docs in doc_files:
if file_type == "json":
data = [docs_to_json(docs)]
len_docs = len(data)
else:
db = DocBin(docs=docs, store_user_data=True)
len_docs = len(db)
data = db.to_bytes()
if output_dir == "-":
_print_docs_to_stdout(data, file_type)
else:
if input_loc != input_path:
subpath = input_loc.relative_to(input_path)
output_file = Path(output_dir) / subpath.with_suffix(f".{file_type}")
else:
output_file = Path(output_dir) / input_loc.parts[-1]
output_file = output_file.with_suffix(f".{file_type}")
_write_docs_to_file(data, output_file, file_type)
msg.good(f"Generated output file ({len_docs} documents): {output_file}")
def _print_docs_to_stdout(data: Any, output_type: str) -> None:
if output_type == "json":
srsly.write_json("-", data)
else:
sys.stdout.buffer.write(data)
def _write_docs_to_file(data: Any, output_file: Path, output_type: str) -> None:
if not output_file.parent.exists():
output_file.parent.mkdir(parents=True)
if output_type == "json":
srsly.write_json(output_file, data)
else:
with output_file.open("wb") as file_:
file_.write(data)
def autodetect_ner_format(input_data: str) -> Optional[str]:
# guess format from the first 20 lines
lines = input_data.split("\n")[:20]
format_guesses = {"ner": 0, "iob": 0}
iob_re = re.compile(r"\S+\|(O|[IB]-\S+)")
ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$")
for line in lines:
line = line.strip()
if iob_re.search(line):
format_guesses["iob"] += 1
if ner_re.search(line):
format_guesses["ner"] += 1
if format_guesses["iob"] == 0 and format_guesses["ner"] > 0:
return "ner"
if format_guesses["ner"] == 0 and format_guesses["iob"] > 0:
return "iob"
return None
def walk_directory(path: Path, converter: str) -> List[Path]:
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif converter == "json" and not path.parts[-1].endswith("json"):
continue
elif converter == "conll" and not path.parts[-1].endswith("conll"):
continue
elif converter == "iob" and not path.parts[-1].endswith("iob"):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def verify_cli_args(
msg: Printer,
input_path: Union[str, Path],
output_dir: Union[str, Path],
file_type: FileTypes,
converter: str,
ner_map: Optional[Path],
):
input_path = Path(input_path)
if file_type not in FILE_TYPES_STDOUT and output_dir == "-":
msg.fail(
f"Can't write .{file_type} data to stdout. Please specify an output directory.",
exits=1,
)
if not input_path.exists():
msg.fail("Input file not found", input_path, exits=1)
if output_dir != "-" and not Path(output_dir).exists():
msg.fail("Output directory not found", output_dir, exits=1)
if ner_map is not None and not Path(ner_map).exists():
msg.fail("NER map not found", ner_map, exits=1)
if input_path.is_dir():
input_locs = walk_directory(input_path, converter)
if len(input_locs) == 0:
msg.fail("No input files in directory", input_path, exits=1)
file_types = list(set([loc.suffix[1:] for loc in input_locs]))
if converter == "auto" and len(file_types) >= 2:
file_types = ",".join(file_types)
msg.fail("All input files must be same type", file_types, exits=1)
if converter != "auto" and converter not in CONVERTERS:
msg.fail(f"Can't find converter for {converter}", exits=1)
def _get_converter(msg, converter, input_path):
if input_path.is_dir():
input_path = walk_directory(input_path, converter)[0]
if converter == "auto":
converter = input_path.suffix[1:]
if converter == "ner" or converter == "iob":
with input_path.open(encoding="utf8") as file_:
input_data = file_.read()
converter_autodetect = autodetect_ner_format(input_data)
if converter_autodetect == "ner":
msg.info("Auto-detected token-per-line NER format")
converter = converter_autodetect
elif converter_autodetect == "iob":
msg.info("Auto-detected sentence-per-line NER format")
converter = converter_autodetect
else:
msg.warn(
"Can't automatically detect NER format. "
"Conversion may not succeed. "
"See https://spacy.io/api/cli#convert"
)
return converter
| spacy-io/spaCy | spacy/cli/convert.py | Python | mit | 10,094 |
from praw import Reddit
import json
import os
import time
import datetime
from .tools import storage
from .tools import display
class FetchBot:
"""Bot to fetch the subreddit data."""
def __init__(self, user_agent, subreddit, data_file):
"""Basic constructor"""
self._user_agent = user_agent
self._subreddit = subreddit
self._data_file = data_file
try:
with open(self._data_file) as df:
self._data = json.load(df)
except (FileNotFoundError,json.decoder.JSONDecodeError):
self._data = json.loads('{"comments":{},"posts":{}}')
try:
if self._data['subreddit'] != self._subreddit:
raise ValueError('The data file does not correspond the subreddit r/'+self._subreddit+' (found "'+self._data['subreddit']+'")')
except KeyError:
self._data['subreddit'] = self._subreddit
self._praw = Reddit(self._user_agent)
def __del__(self):
"""Destructor"""
if not os.path.exists(os.path.dirname(self._data_file)):
os.makedirs(os.path.dirname(self._data_file))
with open(self._data_file, 'w') as df:
json.dump(self._data, df)
def fetch(self):
"""Fetching function"""
self._fetch(self._praw.get_comments(self._subreddit, limit=500), 'comments')
self._fetch(self._praw.get_subreddit(self._subreddit).get_new(limit=500), 'posts')
def _fetch(self, submissions, key):
"""Generic fetching function"""
is_first = True
storage.dict_check_key(self._data[key], 'first', float(round(time.time())))
try:
new_creation_limit = self._data[key]['last']
except:
self._data[key]['last'] = new_creation_limit = 0
for it in submissions:
if is_first:
is_first = False
new_creation_limit = it.created
if it.created <= self._data[key]['last']:
break
storage.dictvar(self._data[key], 'count', 1, 1)
try:
if str(it.author) not in self._data['unique-users']:
self._data['unique-users'][str(it.author)] = {'flair': it.author_flair_text, key: 1}
else:
try:
self._data['unique-users'][str(it.author)][key] += 1
except KeyError:
self._data['unique-users'][str(it.author)][key] = 1
except KeyError:
self._data['unique-users'] = dict()
self._data['unique-users'][str(it.author)] = {'flair': it.author_flair_text, key: 1}
if it.author_flair_text:
storage.dict_check_key(self._data[key], 'flair-presence', dict())
storage.dictvar(self._data[key]['flair-presence'], str(it.author_flair_text), 1, 1)
if key == 'posts':
storage.dict_check_key(self._data[key], 'subject-presence', dict())
storage.dictvar(self._data[key]['subject-presence'], str(it.link_flair_text), 1, 1)
storage.dict_check_key(self._data['unique-users'][str(it.author)], 'subject-presence', dict())
storage.dictvar(self._data['unique-users'][str(it.author)]['subject-presence'], str(it.link_flair_text), 1, 1)
storage.dict_check_key(self._data[key], 'domain-presence', dict())
storage.dictvar(self._data[key]['domain-presence'], str(it.domain), 1, 1)
storage.dict_check_key(self._data[key], 'time', dict())
storage.dict_check_key(self._data[key]['time'], 'all', dict())
storage.dict_check_key(self._data[key]['time'], '0', dict())
storage.dict_check_key(self._data[key]['time'], '1', dict())
storage.dict_check_key(self._data[key]['time'], '2', dict())
storage.dict_check_key(self._data[key]['time'], '3', dict())
storage.dict_check_key(self._data[key]['time'], '4', dict())
storage.dict_check_key(self._data[key]['time'], '5', dict())
storage.dict_check_key(self._data[key]['time'], '6', dict())
time_datetime = datetime.datetime.fromtimestamp(float(it.created_utc))
time_str = str(time_datetime.hour).zfill(2)+str(time_datetime.minute).zfill(2)
storage.dictvar(self._data[key]['time']['all'], time_str, 1, 1)
storage.dictvar(self._data[key]['time'][str(time_datetime.weekday())], time_str, 1, 1)
self._data[key]['last'] = new_creation_limit
def FetchBotGenerator(config_file):
"""Generate a list-like container of FetchBot objects"""
with open(config_file) as cf:
json_config = json.load(cf)
user_agent = json_config['user-agent']
for i in json_config['bots']:
yield FetchBot(user_agent, i['subreddit'], i['data-file'])
def autorun():
"""Autorun function of this module"""
home = os.getenv('HOME')
config_file = os.path.join(home, '.config/flairstats/config.json')
if not os.path.exists(config_file):
raise FileNotFoundError(config_file)
fetchbots = FetchBotGenerator(config_file)
for bot in fetchbots:
bot.fetch()
if __name__ == "__main__":
autorun()
| dopsi/flairstats | flairstats/fetchbot.py | Python | mit | 5,308 |
from __future__ import print_function
from __future__ import division
__author__ = """Alex "O." Holcombe""" ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import numpy as np
import itertools #to calculate all subsets
from copy import deepcopy
from math import atan, pi, cos, sin, sqrt, ceil
import time, sys, platform, os, gc
from psychopy import visual, core
import random
#If you run this code stand-alone, it will do a demo of the basic stimulus it is designed to provide
#BEGIN helper functions from primes.py
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a,b):
"""Return lowest common multiple."""
return (a*b)/gcd(a,b)
def LCM(terms):
"Return lcm of a list of numbers."
return reduce(lambda a,b: lcm(a,b), terms)
#END helper functions from primes.py
def calcCondsPerNumTargets(numRings,numTargets):
#numRings is number of rings, each of which can have up to one target
#numTargets is list or array of numTarget conditions, e.g. 1,2,3 means the experiment includes 1, 2, and 3 targets
#Each target can be placed randomly in any of the rings.
#Want all possibilities to be covered equally often. That means each target number condition has to include all the combinations
# of places that number of targets can go.
#So that some targetNum conditinos don't have more trials than others, have to scale up each targetNum condition to the worst case.
#Actually it's worse than that. To make them fit evenly, have to use least common multiple
#3 rings choose 2 for targets, 3 rings choose 1 for target, have to have as many conditions as the maximum.
#To find maximum, determine length of each.
ringNums = np.arange(numRings)
numPossibilitiesEach = list()
for k in numTargets:
numPossibilitiesCouldPutKtargets = len( list(itertools.combinations(ringNums,k)) )
#print(numPossibilitiesCouldPutKtargets)
numPossibilitiesEach.append( numPossibilitiesCouldPutKtargets )
m = max( numPossibilitiesEach ) #because the worst case (number of targets) requires this many, have to have this many for all. Actually,
leastCommonMultiple = LCM( numPossibilitiesEach ) #to have equal number of trials per numtargets, would have to use this figure for each
#print('biggest=',m, ' Least common multiple=', leastCommonMultiple)
return leastCommonMultiple
def accelerateComputer(slowFast, process_priority, disable_gc):
# process_priority = 'normal' 'high' or 'realtime'
if slowFast:
if process_priority == 'normal':
pass
elif process_priority == 'high':
core.rush(True)
elif process_priority == 'realtime': # Only makes a diff compared to 'high' on Windows.
core.rush(True, realtime = True)
else:
print('Invalid process priority:',process_priority,"Process running at normal.")
process_priority = 'normal'
if disable_gc:
gc.disable()
if slowFast==0: #turn off the speed-up
if disable_gc:
gc.enable()
core.rush(False)
def openMyStimWindow(monitorSpec,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=monitorSpec,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
if myWin is None:
print('ERROR: Failed to open window in openMyStimWindow!')
core.quit()
return myWin
def constructRingsAsGratings(myWin,numRings,radii,ringRadialMaskEachRing,numObjects,patchAngle,colors,stimColorIdxsOrder,gratingTexPix,blobToCueEachRing,ppLog):
#Originally to construct a grating formed of the colors in order of stimColorIdxsOrder
antialiasGrating = True
autoLogging = False
texEachRing=list() #texture which will draw the ring of objects via openGL texture on grating
cueTexEachRing=list() #making a separate grating for the cue, wherein everything background color except the location of the cue
ringsRadial=list(); #after making the rings of object, put them in this list
cueRings=list() #after making grating for each cue, put it in this cue
stimColorIdxsOrder= stimColorIdxsOrder[::-1] #reverse order of indices, because grating texture is rendered in reverse order than is blobs version
radialMaskEachRing=[[0,0,0,1,1,] ,[0,0,0,0,0,0,1,1,],[0,0,0,0,0,0,0,0,0,0,1,1,]]
numUniquePatches= len( max(stimColorIdxsOrder,key=len) )
numCycles =(1.0*numObjects) / numUniquePatches
angleSegment = 360./(numUniquePatches*numCycles)
if gratingTexPix % numUniquePatches >0: #gratingTexPix contains numUniquePatches. numCycles will control how many total objects there are around circle
ppLog.warn('Warning: could not exactly render a '+str(numUniquePatches)+'-segment pattern radially, will be off by '+str( (gratingTexPix%numUniquePatches)*1.0 /gratingTexPix ) )
if numObjects % numUniquePatches >0:
msg= 'Warning: numUniquePatches ('+str(numUniquePatches)+') not go evenly into numObjects'; ppLog.warn(msg)
#create texture for red-green-blue-red-green-blue etc. radial grating
for i in range(numRings):
#myTex.append(np.zeros([gratingTexPix,gratingTexPix,3])+[1,-1,1])
texEachRing.append( np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] ) #start with all channels in all locs = bgColor
cueTexEachRing.append( np.ones([gratingTexPix,gratingTexPix,3])*bgColor[0] )
if patchAngle > angleSegment:
msg='Error: patchAngle requested ('+str(patchAngle)+') bigger than maximum possible ('+str(angleSegment)+') numUniquePatches='+str(numUniquePatches)+' numCycles='+str(numCycles);
print(msg); ppLog.error(msg)
oneCycleAngle = 360./numCycles
segmentSizeTexture = angleSegment/oneCycleAngle *gratingTexPix #I call it segment because includes spaces in between, that I'll write over subsequently
patchSizeTexture = patchAngle/oneCycleAngle *gratingTexPix
patchSizeTexture = round(patchSizeTexture) #best is odd number, even space on either size
patchFlankSize = (segmentSizeTexture-patchSizeTexture)/2.
patchAngleActual = patchSizeTexture / gratingTexPix * oneCycleAngle
if abs(patchAngleActual - patchAngle) > .03: #.01
msg = 'Desired patchAngle = '+str(patchAngle)+' but closest can get with '+str(gratingTexPix)+' gratingTexPix is '+str(patchAngleActual);
ppLog.warn(msg)
for colrI in range(numUniquePatches): #for that portion of texture, set color
start = colrI*segmentSizeTexture
end = start + segmentSizeTexture
start = round(start) #don't round until after do addition, otherwise can fall short
end = round(end)
ringColr=list();
for i in range(numRings):
ringColr.append(colors[ stimColorIdxsOrder[i][colrI] ])
for colorChannel in range(3):
for i in range(numRings):
texEachRing[i][:, start:end, colorChannel] = ringColr[i][colorChannel];
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:, base+start/numCycles:base+end/numCycles, colorChannel] = ringColr[1][colorChannel]
#draw bgColor area (emptySizeEitherSideOfPatch) by overwriting first and last entries of segment
for i in range(numRings):
texEachRing[i][:, start:start+patchFlankSize, :] = bgColor[0]; #one flank
texEachRing[i][:, end-1-patchFlankSize:end, :] = bgColor[0]; #other flank
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:,base+start/numCycles:base+(start+patchFlankSize)/numCycles,:] =bgColor[0];
cueTexEachRing[i][:,base+(end-1-patchFlankSize)/numCycles:base+end/numCycles,:] =bgColor[0]
#color the segment to be cued white. First, figure out cue segment len
segmentLen = gratingTexPix/numCycles*1/numUniquePatches
WhiteCueSizeAdj=0 # adust the white cue marker wingAdd 20110923
if numObjects==3:WhiteCueSizeAdj=110
elif numObjects==6:WhiteCueSizeAdj=25
elif numObjects==12:WhiteCueSizeAdj=-15
elif numObjects==2:WhiteCueSizeAdj=200
for i in range(numRings): #color cue position white
if blobToCueEachRing[i] >=0: #-999 means dont cue anything
blobToCueCorrectForRingReversal = numObjects-1 - blobToCueEachRing[i] #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
if blobToCueCorrectForRingReversal==0 and numObjects==12: WhiteCueSizeAdj=0
cueStartEntry = blobToCueCorrectForRingReversal*segmentLen+WhiteCueSizeAdj
cueEndEntry = cueStartEntry + segmentLen-2*WhiteCueSizeAdj
cueTexEachRing[i][:, cueStartEntry:cueEndEntry, :] = -1*bgColor[0] #-1*bgColor is that what makes it white?
blackGrains = round( .25*(cueEndEntry-cueStartEntry) )#number of "pixels" of texture at either end of cue sector to make black. Need to update this to reflect patchAngle
cueTexEachRing[i][:, cueStartEntry:cueStartEntry+blackGrains, :] = bgColor[0]; #this one doesn't seem to do anything?
cueTexEachRing[i][:, cueEndEntry-1-blackGrains:cueEndEntry, :] = bgColor[0];
angRes = 100 #100 is default. I have not seen any effect. This is currently not printed to log file!
for i in range(numRings):
ringsRadial.append(visual.RadialStim(myWin, tex=texEachRing[i], color=[1,1,1],size=radii[i],#myTexInner is the actual colored pattern. radial grating used to make it an annulus
mask=ringRadialMaskEachRing[i], # this is a 1-D mask dictating the behaviour from the centre of the stimulus to the surround.
radialCycles=0, angularCycles=numObjects*1.0/numUniquePatches,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging))
#the mask is radial and indicates that should show only .3-.4 as one moves radially, creating an annulus
#end preparation of colored rings
#draw cueing grating for tracking task. Have entire grating be empty except for one white sector
cueRings.append(visual.RadialStim(myWin, tex=cueTexEachRing[i], color=[1,1,1],size=radii[i], #cueTexInner is white. Only one sector of it shown by mask
mask = radialMaskEachRing[i], radialCycles=0, angularCycles=1, #only one cycle because no pattern actually repeats- trying to highlight only one sector
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging) )#depth doesn't seem to work, just always makes it invisible?
currentlyCuedBlobEachRing = blobToCueEachRing #this will mean that don't have to redraw
return ringsRadial,cueRings,currentlyCuedBlobEachRing
######### End constructRingAsGrating ###########################################################
#########################################
def gratingAngleToEuclidean(theta):
euclidean = -1.0 * theta #because grating angles are clockwise and euclidean is counter-clockwise
euclidean += 90 #because gratings start with 0 at North and Euclidean 0 is East
return euclidean
def constructThickThinWedgeRingsTargetAndCue(myWin,initialAngle,radius,radialMask,radialMaskTarget,cueRadialMask,visibleWedge,numObjects,patchAngleThick,patchAngleThin,bgColor,
thickWedgeColor,thinWedgeColor,targetAngleOffset,targetRadialOffset,gratingTexPix,cueColor,objToCue,ppLog):
#Construct a grating formed of the colors in order of stimColorIdxsOrder
#Also construct a similar cueRing grating with same colors, but one blob potentially highlighted.
#cueRing Has different spacing than ringRadial, not sure why, I think because calculations tend to be off as it's
#always one cycle.
#radialMask doesn't seem to eliminate very-central part, bizarre
antialiasGrating = False #Don't set this to true because in present context, it's like imposing a radial Gaussian ramp on each object
autoLogging = False
numCycles = numObjects
segmentAngle = 360./numCycles
#create texture for red-green-blue-red-green-blue etc. radial grating
#2-D texture which will draw the ring of objects via openGL texture on grating
ringTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
cueTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
decoyTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
oneCycleAngle = 360./numCycles
def patchSizeForTexture(segmentAngle, patchAngle, oneCycleAngle, gratingTexPix):
segmentSizeTexture = segmentAngle/oneCycleAngle *gratingTexPix #I call it segment because includes spaces between objects, that I'll write over subsequently
if patchAngle > segmentAngle:
msg='Error: patchAngle requested ('+str(patchAngle)+') bigger than maximum possible ('+str(segmentAngle)+') numCycles='+str(numCycles)
print(msg); ppLog.error(msg)
patchSizeTexture = patchAngle*1.0/oneCycleAngle *gratingTexPix
patchSizeTexture = round(patchSizeTexture) #best is odd number, even space on either size
patchFlankSize =int( (segmentSizeTexture-patchSizeTexture)/2. )#this area will be drawn in bgColor
patchAngleActual = patchSizeTexture*1.0 / gratingTexPix * oneCycleAngle
if abs(patchAngleActual - patchAngle) > .03:
msg = 'Desired patchAngle = '+str(patchAngle)+' but closest can get with '+str(gratingTexPix)+' gratingTexPix is '+str(patchAngleActual);
ppLog.warn(msg)
return segmentSizeTexture, patchSizeTexture, patchFlankSize
#thick wedges. Create texture for visual.radialStim
segmentSizeTexture, patchSizeTexture, patchFlankSize = patchSizeForTexture(segmentAngle, patchAngleThick, oneCycleAngle, gratingTexPix)
start = 0 #identify starting texture position for this segment
end = int( round( start + segmentSizeTexture ) ) #don't round until after do addition, otherwise can fall short
angRes = 200 #100 is default. I have not seen an artifact at present when set to 100, two things drawn don't overlap exactly
#First draw the entire segment in patchColr, then erase sides (flankers) leaving only the patchAngle
ringTex[:, start:end, :] = thickWedgeColor[:]
#spaces in between objects are termed the flanks, should be bgColor,
ringTex[:, start:start+patchFlankSize, :] = bgColor[:] #one flank
ringTex[:, end-1-patchFlankSize:end, :] = bgColor[:] #other flank
#thin wedges. Create texture for visual.radialStim
segmentSizeTexture, thinWedgeSizeTexture, patchFlankSizeThinWedge = patchSizeForTexture(segmentAngle, patchAngleThin, oneCycleAngle, gratingTexPix)
#Instead of drawing the red and undisplaced blue with the same call to radialStim,
#We will draw the red with one call to radialStim, then the thinner blue sliver on top, using radialMask so it's only the sliver and leaves the
#remainder of the red showing.
#First draw the thick red contexts thickWedges
ringRadialThickWedges= visual.RadialStim(myWin, tex=ringTex, color=[1,1,1],size=radius,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge=visibleWedge,
ori = initialAngle, #essentially the phase of the grating
mask=radialMask, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#thinWedge, the usually-blue target
#First draw the entire segment in thinWedgeColor, then erase sides (flankers) leaving only the patchAngle (wedge angle)
thinRingTex = np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] #start with all channels in all locs = bgColor
thinRingTex[:, start:end, :] = thinWedgeColor[:]
#spaces in between objects are termed the flanks, should be bgColor,
thinRingTex[:, start:start+patchFlankSize, :] = bgColor[:] #one flank
thinRingTex[:, end-1-patchFlankSize:end, :] = bgColor[:] #other flank
ringRadialThinWedges= visual.RadialStim(myWin, tex=thinRingTex, color=[1,1,1],size=radius,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge=visibleWedge,
ori = initialAngle, #essentially the phase of the grating
mask=radialMaskTarget, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#Draw target (task is to judge offset of thin wedge relative to thick wedge, or brightness of target)
#So, overdraw a single segment of the grating by using visibleWedge
#angularPhase =
#I need to not show the part of the thick wedge that will be displaced, while showing enough of thick wedge to overdraw previous location of thin wedge
targetCorrectedForRingReversal = objToCue #numObjects-1 - objToCue #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
kludgeWiden= 5
visibleAngleStart = targetCorrectedForRingReversal*segmentAngle + (segmentAngle-patchAngleThick)/2 - kludgeWiden
visibleAngleEnd = (visibleAngleStart+kludgeWiden) + patchAngleThick + kludgeWiden
#print('targetCorrectedForRingReversal = ',targetCorrectedForRingReversal,'targetRing initialAngle=', initialAngle, ' visibleAngleStart=',visibleAngleStart,' visibleAngleEnd=',visibleAngleEnd)
if targetAngleOffset >= 0:
visibleAngleEnd -= targetAngleOffset #don't show the part of the thick wedge that would be displaced
else: #shifted the other way, towards the start, so spillover on that side needs to be avoided by not drawing it
visibleAngleStart -= targetAngleOffset
#DRAW THE TARGET RING, like the above ringRadial except displaced
#Below call is identical to ringRadial except ori
#set visibleWedge so it only highlights a single thick wedge
targetRadial= visual.RadialStim(myWin, tex=thinRingTex, color=[1,1,1],size=radius+targetRadialOffset,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge=[visibleAngleStart,visibleAngleEnd],
ori = initialAngle+targetAngleOffset, #Always zero in the new version where the task is to judge the radial offset of the blue thin wedge
mask=radialMaskTarget, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#MAKE A COPY of the thick red ring to draw over undisplaced blue
ringRadialThickWedgesCopy= visual.RadialStim(myWin, tex=ringTex, color=[1,1,1],size=radius,#ringTex is the actual colored pattern. radial grating used to make it an annulus
visibleWedge= (visibleAngleStart,visibleAngleEnd),
ori=initialAngle,
mask=radialMask, # this is a 1-D mask masking the centre, to create an annulus
radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
#Draw lines (alternative target)
lines=[]
#calculate estimated eccentricity that grating target would be at, if were using grating targets
#Find the center of the ones in radialMaskTarget, multiply by grating radius
oneIndices = np.where(radialMaskTarget==1)[0]
oneIndexMean = np.mean(oneIndices)
proportnOfRadius = oneIndexMean / len(radialMaskTarget)
proportnOfRadius += 0.5 * 1/len(radialMaskTarget) #Because grating mask doesn't work with centering, rather it's the beginning or something so need to add to get to center of wedge
eccentricity = proportnOfRadius* radius
eccentricity = eccentricity / 1.97 #Don't know why need to divide by almost 2
#print("oneIndexMean = ", oneIndexMean, "proportnOfRadius = ", proportnOfRadius, "eccentricity = ", eccentricity)
#Calculate appropriate line width in deg
wedgeThicknessFraction = len( np.where(radialMask)[0] ) * 1.0 / len(radialMask)
wedgeThickness = wedgeThicknessFraction*radius/2
targeti = targetCorrectedForRingReversal % numObjects # (targetCorrectedForRingReversal-1) % numObjects #dont know why have to subtract 1. Then have to mod numObjects so negative number gets turned into positive
targetFillColors = np.array([[.9,.9,.9],[-.8,-.8,-.8]]) # [-.3,-.3,-.3]
if sys.version_info[0] == 3: #python3
rangeOverObjects = range(0,numObjects)
else: #python2
rangeOverObjects = xrange(0,numObjects)
for i in rangeOverObjects:
lineHeight = wedgeThickness * 1.0# *1.0
lineWidth = lineHeight / 4 #divided by 10 makes it really small alex size, with div 4 being same as E2
angleDeg = initialAngle
angleDeg+= (visibleAngleStart+visibleAngleEnd)/2 #because when gratings are drawn, there's this additional offset for which bit of the grating is visible
angleDeg += i/numObjects*360
tangentialOrientation = i/numObjects*360
if __name__ != "__main__": #not self-test
halfAngle = 360/numObjects/2 #For some reason target is offset by half the distance between two objects, even though that doesn't happen in helpersAOH self-test
tangentialOrientation += halfAngle
x = cos( gratingAngleToEuclidean(angleDeg)*pi/180 ) * eccentricity
y = sin( gratingAngleToEuclidean(angleDeg)*pi/180 ) * eccentricity
lineColor = targetFillColors[0]
if i == targeti:
#print("line targeti=", targeti, " angleDeg=",angleDeg, "Euclidean angle=",gratingAngleToEuclidean(angleDeg) )
orientation = tangentialOrientation
if targetRadialOffset<0: #it's always one of two values, a negative one and a positive one
#orientation = tangentialOrientation + 90
lineColor = targetFillColors[1] #opposite color
else:
#orientation = tangentialOrientation + random.randint(0,1)*90
lineColor = targetFillColors[ random.randint(0,1) ]
#if orientation==tangentialOrientation: #make bigger because harder to see
# lineHeight *= 1.4 #for tangential, make longer
#else: lineHeight *=.8
#print("Drawing line ",i," at x=",x, " y=", y, "targetCorrectedForRingReversal=", targetCorrectedForRingReversal )
#thisLine = visual.Rect(myWin, width=lineWidth, height=lineHeight, pos=(x,y), ori=orientation, fillColor=lineColor, lineColor=None, autoLog=autoLogging)
thisLine = visual.Circle(myWin, radius=lineWidth, pos=(x,y), fillColor=lineColor, lineColor=None, autoLog=autoLogging)
lines.append(thisLine)
#CREATING CUE TEXTURE
#Both inner and outer cue arcs can be drawn in one go via a radial mask
#use visibleWedge so it only highlights a single thick wedge
start = 0 #identify starting texture position for this segment
#start:end is meant to be entire segment, but the flanks, patchFlank, are drawn in bgColor
#But remember this is a texture so maybe the first patchFlankSize portion is meant to be bg
start = patchFlankSize
end = start + segmentSizeTexture - 2*patchFlankSize
start = int( round(start) )
end = int( round(end) )
decoyTex[:, start:end, :] = cueColor[:]
#Because I'm only showing a tiny sliver via visibleAngle, can color the whole thing
cueTex[:, :, :] = cueColor[:]
# start = 0 #identify starting texture position for this segment
# start = int( round( start+patchFlankSize ) )
# end = int( round(start + segmentSizeTexture - patchFlankSize) )#don't round until after do addition, otherwise can fall short
# cueTex[:, start:end, :] = cueColor[:]
# cueTex[:, :, :] = cueColor[:]
#draw cue
visibleAngleStart = 0; visibleAngleEnd=360
if objToCue>=0:
objToCueCorrectdForRingReversal = objToCue #numObjects-1 - objToCue #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
kludgeCueThickenToMatchDecoy = 3
visibleAngleStart = objToCueCorrectdForRingReversal*segmentAngle + (segmentAngle-patchAngleThick)/2 - kludgeCueThickenToMatchDecoy
visibleAngleEnd = visibleAngleStart + patchAngleThick + kludgeCueThickenToMatchDecoy + 2
#print('objToCueCorrectdForRingReversal = ',objToCueCorrectdForRingReversal,' visibleAngleStart=',visibleAngleStart,' visibleAngleEnd=',visibleAngleEnd)
#decoyRing is optional ring to show a precue around all object positions, to eventually be replaced by a ring around only the target object
decoyRing = visual.RadialStim(myWin, tex=decoyTex, color=[1,1,1],size=radius, #cueTexInner is white. Only one sector of it shown by mask
ori = initialAngle,
mask = cueRadialMask, radialCycles=0, angularCycles=numObjects,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
cueRing = visual.RadialStim(myWin, tex=cueTex, color=[1,1,1],size=radius, #cueTexInner is white. Only one sector of it shown by mask
visibleWedge=[visibleAngleStart,visibleAngleEnd],
ori = initialAngle,
mask = cueRadialMask, radialCycles=0, angularCycles=1, #only one cycle because no pattern actually repeats- trying to highlight only one sector
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging)
return ringRadialThickWedges,ringRadialThickWedgesCopy,ringRadialThinWedges,targetRadial,cueRing,lines, decoyRing
######### End constructThickThinWedgeRingsTargetAndCue ###########################################################
########################################################### ###########################################################
if __name__ == "__main__": #do self-tests
from psychopy import *
from psychopy import monitors, logging
monitorwidth = 38.5 #28.5 #monitor width in centimeters
viewdist = 57.; #cm
mon = monitors.Monitor("testMonitor",width=monitorwidth, distance=viewdist) #fetch the most recent calib for this monitor
bgColor = [0,0,0] # [-1,-1,-1]
allowGUI = True; units='deg'; fullscr=0; scrn=0; waitBlank=False
#mon.setSizePix( (widthPix,heightPix) )
widthPix = 800; heightPix = 600
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
widthPix = myWin.size[0]; heightPix = myWin.size[1]
#Task will be to judge which thick wedge has the thin wedge offset within it
numObjects = 8
initialAngle = 0 #random.random()*360
gratingTexPix= 1024
objToCue=0
radius = 25.
visibleWedge = [0,360]
patchAngleThickWedges = 360/numObjects/2
thickWedgeColor = [1,-1,-1]
thinWedgeColor=[0,0,1]
cueColor=[0,1,1]
radialMask = np.array( [0,0,0,0,1,0,0,0,0] ) #This is for the larger wedge that the TargetSliver is embedded in
radialMaskThinWedge = np.array( [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] ) #This is the sliver that's offset relative to the larger wedge, that you have to judge the offset of
wedgeRadiusFraction = np.where(radialMask)[0][0]*1.0 / len(radialMask)
print('wedgeRadiusFraction = ',wedgeRadiusFraction)
wedgeThicknessFraction = len( np.where(radialMask)[0] )*1.0 / len(radialMask)
print('wedgeThickness = ',wedgeThicknessFraction*radius)
wedgeCenterFraction = wedgeRadiusFraction + wedgeThicknessFraction/2.
targetSliverRadiusFraction = np.where(radialMaskThinWedge)[0][0]*1.0 / len(radialMaskThinWedge)
print('targetSliverRadiusFraction = ',targetSliverRadiusFraction)
targetSliverThicknessFraction = len( np.where(radialMaskThinWedge)[0] )*1.0 / len(radialMaskThinWedge)
targetSliverCenterFraction = targetSliverRadiusFraction + targetSliverThicknessFraction/2.
print('targetSliverThickness = ',targetSliverThicknessFraction*radius, ' targetSliverCenterFraction=',targetSliverCenterFraction)
#distance of cue arc
desiredArcDistanceFractionRadius = .23
cueInnerArcDesiredFraction = wedgeCenterFraction - desiredArcDistanceFractionRadius
cueOuterArcDesiredFraction = wedgeCenterFraction + desiredArcDistanceFractionRadius
if cueOuterArcDesiredFraction > 1:
msg='Can"t start outer arc at fraction='+str(cueOuterArcDesiredFraction)
logging.error(msg); print(msg)
fractionResolution = .02 #Quantisation of possible positions of cue arc
binsNeeded = round(1.0 / fractionResolution)
cueRadialMask = np.zeros( binsNeeded )
#For the cueRadialMask, want everything zero except just inside and outside of the wedges.
innerArcCenterPos = int( round( binsNeeded*cueInnerArcDesiredFraction ) )
outerArcCenterPos = int( round( binsNeeded*cueOuterArcDesiredFraction ) )
cueRadialMask[ innerArcCenterPos-5:innerArcCenterPos+5 ] = 1
cueRadialMask[ outerArcCenterPos-1:outerArcCenterPos+1 ] = 1
print('cueInnerArcDesiredFraction = ',cueInnerArcDesiredFraction, ' actual = ', innerArcCenterPos*1.0/len(cueRadialMask) )
print('cueOuterArcDesiredFraction = ',cueOuterArcDesiredFraction, ' actual = ', outerArcCenterPos*1.0/len(cueRadialMask) )
targetAngleOffset = 0; targetRadialOffset = -1
thickWedgesRing,thickWedgesRingCopy, thinWedgesRing, targetRing, cueRing, lines, decoyRing = \
constructThickThinWedgeRingsTargetAndCue(myWin,initialAngle,radius,radialMask,radialMaskThinWedge,cueRadialMask,visibleWedge,numObjects,
patchAngleThickWedges,patchAngleThickWedges,bgColor,thickWedgeColor,thinWedgeColor,targetAngleOffset,targetRadialOffset,
gratingTexPix,cueColor,objToCue,ppLog=logging)
decoy = True
keepGoing = True
while keepGoing:
thickWedgesRing.draw()
thinWedgesRing.draw()
if decoy:
decoyRing.draw()
else:
cueRing.draw()
#Draw thin wedges at same time as thick wedges. But when time to draw target, draw over old position of target thin wedge and draw displaced version
#Now program the cue arcs and the target-displaced ring
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape','q']:
keepGoing = False
respcount = 1
else: #key in [
print('key =', key)
keepGoing = True #draw target superposed
while keepGoing:
#The thickWedgesRing, typically red, are drawn as a radial grating that occupies all 360 deg circular, with a texture to mask out everything else to create a ring
#The thinWedgesRing, typically blue, are centered in the red and one of these wedges will be later displaced to create a target.
#The targetRing is the displaced blue wedge. Actually a full circular radial grating, but visibleWedge set to subtend only the part where the target is.
#The thickWedgesRingCopy is to draw over the old, undisplaced blue wedge, only in the target area. It is thus a copy of the thickWedgesRing,
# with visibleWedge set to show only the target part
#The cueRing is two white arcs to bring attention to the target area.
thickWedgesRing.draw() #Draw red thick wedges
thinWedgesRing.draw() #Draw thin blue wedge centered in thick red wedges
#When time to draw target, draw over old position of target thin wedge and draw displaced version
thickWedgesRingCopy.draw()
targetRing.draw() #this is the particular blue patch offset. And drawing the rest in red, so that the undisplaced doesn't show through.
for line in lines:
line.draw()
cueRing.draw()
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape','q']:
keepGoing = False
respcount = 1
else: #key in [
print('key =', key)
| alexholcombe/movingCue | helpersAOHtargetFinalCueLocatn.py | Python | mit | 32,987 |
#!/usr/bin/env python
import sys
with open(sys.argv[1], 'r') as my_file:
print(my_file.read())
| TheShellLand/pies | v3/Libraries/sys/Open file.py | Python | mit | 102 |
'''
Created on Feb 4, 2016
Decoding tables taken from https://github.com/typiconman/Perl-Lingua-CU
@author: mike kroutikov
'''
from __future__ import print_function, unicode_literals
import codecs
def ucs_decode(input_, errors='strict'):
return ''.join(decoding_table[x] for x in input_), len(input_)
def ucs_encode(input_, errors):
raise NotImplementedError('encoding to UCS is not implemented')
### Decoding Table
decoding_table = (
'\x00',
'\x01',
'\x02',
'\x03',
'\x04',
'\x05',
'\x06',
'\x07',
'\x08',
'\t',
'\n',
'\x0b',
'\x0c',
'\r',
'\x0e',
'\x0f',
'\x10',
'\x11',
'\x12',
'\x13',
'\x14',
'\x15',
'\x16',
'\x17',
'\x18',
'\x19',
'\x1a',
'\x1b',
'\x1c',
'\x1d',
'\x1e',
'\x1f',
' ',
'!',
'"',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0483',
"'",
'(',
')',
'\ua673',
'\u2de1\u0487', # combining VE
',',
'-',
'.',
'/',
'\u043e\u0301',
'\u0301',
'\u0300',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0311', # combining inverted breve
'\u0483', # titlo
'\u033e', # combining vertical tilde
'\u0436\u0483', # zhe with titlo above
':',
';',
'\u2def', # combining HA
'\u2de9\u0487', # combining EN
'\u2dec\u0487', # combining ER
'\u2df1\u0487', # combining CHE
'\u0300',
'\u0430\u0300', # latin A maps to AZ with grave accent
'\u0463\u0311', # latin B maps to Yat' with inverted breve
'\u2ded\u0487', # combining ES
'\u0434\u2ded\u0487',
'\u0435\u0300', # latin E maps to e with grave accent
'\u0472', # F maps to THETA
'\u0433\u0483', # G maps to ge with TITLO
'\u0461\u0301', # latin H maps to omega with acute accent
'\u0406',
'\u0456\u0300',
'\ua656\u0486', # YA with psili
'\u043b\u2de3', # el with cobining de
'\u0476', # capital IZHITSA with kendema
'\u047a\u0486', # capital WIDE ON with psili
'\u047a', # just capital WIDE ON
'\u0470', # capital PSI
'\u047c', # capital omega with great apostrophe
'\u0440\u0483', # lowercase re with titlo
'\u0467\u0300', # lowercase small yus with grave
'\u047e', # capital OT
'\u041e\u0443', # diagraph capital UK
'\u0474', # capital IZHITSA
'\u0460', # capital OMEGA
'\u046e', # capital XI
'\ua64b\u0300', # monograph uk with grave
'\u0466', # capital SMALL YUS
'[',
'\u0483', # yet another titlo
']',
'\u0311', # combining inverted breve
'\u033e', # yet another yerik
'`',
'\u0430\u0301', # latin A maps to AZ with acute accent
'\u2dea\u0487', # combining ON
'\u2ded\u0487', # combining ES
'\u2de3', # combining DE
'\u0435\u0301', # latin E maps to e with acute accent
'\u0473', # lowercase theta
'\u2de2\u0487', # combining ge
'\u044b\u0301', # ery with acute accent
'\u0456',
'\u0456\u0301', # i with acute accent
'\ua657\u0486', # iotaed a with psili
'\u043b\u0483', # el with titlo
'\u0477', # izhitsa with izhe titlo
'\u047b\u0486', # wide on with psili
'\u047b', # wide on
'\u0471', # lowercase psi
'\u047d', # lowercase omega with great apostrophe
'\u0440\u2ded\u0487', # lowercase er with combining es
'\u0467\u0301', # lowercase small yus with acute accent
'\u047f', # lowercase ot
'\u1c82\u0443', # diagraph uk
'\u0475', # lowercase izhitsa
'\u0461', # lowercase omega
'\u046f', # lowercase xi
'\ua64b\u0301', # monograph uk with acute accent
'\u0467', # lowercase small yus
'\ua64b\u0311', # monograph uk with inverted breve
'\u0467\u0486\u0300', # lowercase small yus with apostroph
'\u0438\u0483', # the numeral eight
'\u0301', # yet another acute accent
'\x7f',
'\u0475\u0301', # lowercase izhitsa with acute
'\u0410\u0486\u0301', # uppercase A with psili and acute
'\u201a',
'\u0430\u0486\u0301', # lowercase A with psili and acute
'\u201e',
'\u046f\u0483', # the numberal sixty
'\u0430\u0311', # lowercase a with inverted breve
'\u0456\u0311', # lowercase i with inverted breve
'\u2de5', # combining ze
'\u0467\u0311', # lowercase small yus with inverted breve
'\u0466\u0486', # upercase small yus with psili
'\u0456\u0483', # the numeral ten
'\u0460\u0486', # capital OMEGA with psili
'\u041e\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua656\u0486\u0301', # uppercase Iotated A with apostroph
'\u047a\u0486\u0301', # uppercase Round O with apostroph
'\u0475\u2de2\u0487', # lowercase izhitsa with combining ge
'\u2018',
'\u2019',
'\u201c',
'\u201d',
'\u2de4', # combining zhe
'\u2013',
'\u2014',
'\ufffe',
'\u0442\u0483',
'\u0467\u0486', # lowercase small yus with psili
'\u0475\u0311', # izhitsa with inverted breve
'\u0461\u0486', # lowercase omega with psili
'\u1c82\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua657\u0486\u0301', # lowercase iotaed a with apostroph
'\u047b\u0486\u0301', # lowercase Round O with apostroph
'\xa0',
'\u041e\u0443\u0486', # Capital Diagraph Uk with psili
'\u1c82\u0443\u0486', # lowercase of the above
'\u0406\u0486\u0301', # Uppercase I with apostroph
'\u0482', # cyrillic thousands sign
'\u0410\u0486', # capital A with psili
'\u0445\u0483', # lowercase kha with titlo
'\u0447\u0483', # the numeral ninety
'\u0463\u0300', # lowecase yat with grave accent
'\u0441\u0483', # the numeral two hundred
'\u0404',
'\xab',
'\xac',
'\xad',
'\u0440\u2de3', # lowercase er with dobro titlo
'\u0406\u0486',
'\ua67e', # kavyka
'\ua657\u0486\u0300',
'\u0406',
'\u0456\u0308',
'\u0430\u0486',
'\u0443', # small letter u (why encoded at the micro sign?!)
'\xb6',
'\xb7',
'\u0463\u0301', # lowercase yat with acute accent
'\u0430\u0483', # the numeral one
'\u0454', # wide E
'\xbb',
'\u0456\u0486\u0301', # lowercase i with apostroph
'\u0405',
'\u0455',
'\u0456\u0486', # lowercase i with psili
'\u0410',
'\u0411',
'\u0412',
'\u0413',
'\u0414',
'\u0415',
'\u0416',
'\u0417',
'\u0418',
'\u0419',
'\u041a',
'\u041b',
'\u041c',
'\u041d',
'\u041e',
'\u041f',
'\u0420',
'\u0421',
'\u0422',
'\ua64a',
'\u0424',
'\u0425',
'\u0426',
'\u0427',
'\u0428',
'\u0429',
'\u042a',
'\u042b',
'\u042c',
'\u0462', # capital yat
'\u042e',
'\ua656', # capital Iotified A
'\u0430',
'\u0431',
'\u0432',
'\u0433',
'\u0434',
'\u0435',
'\u0436',
'\u0437',
'\u0438',
'\u0439',
'\u043a',
'\u043b',
'\u043c',
'\u043d',
'\u043e',
'\u043f',
'\u0440',
'\u0441',
'\u0442',
'\ua64b', # monograph Uk (why?!)
'\u0444',
'\u0445',
'\u0446',
'\u0447',
'\u0448',
'\u0449',
'\u044a',
'\u044b',
'\u044c',
'\u0463', # lowercase yat
'\u044e',
'\ua657', # iotaed a
)
def _build_decoding_table(fname):
'''unitily to build decoding_table from Perl's ucsequivs file. we base on cp1251 and overlay data from ucsequivs'''
from encodings import cp1251
decode_table = list(cp1251.decoding_table)
comments = [None] * 256
with codecs.open(fname, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line or line == 'use utf8;' or line.startswith('#'):
continue
key, chars, comment = parse_perl_dictionary_entry(line)
decode_table[key] = chars
comments[key] = comment
return decode_table, comments
def parse_perl_dictionary_entry(line):
key, value = line.split('=>')
key = key.strip().strip("'")
if key == '\\\\':
key = '\\'
key = key.encode('cp1251')
assert len(key) == 1, key
key = int(key[0])
value = value.strip()
values = value.split('#', 1)
value = values[0].strip() # removes trailing comment
if len(values) == 2:
comment = values[1].strip()
else:
comment = None
value = value.rstrip(',')
chars = [x.strip() for x in value.split('.')]
assert min(x.startswith('chr(') and x.endswith(')') for x in chars)
chars = [int(x[4:-1], 0) for x in chars]
chars = ''.join(chr(x) for x in chars)
return key, chars, comment
if __name__ == '__main__':
'''Code that generates "decoding_table" from Perl ucs encoding table.
1. Download Perl UCS encoding table from:
https://raw.githubusercontent.com/typiconman/Perl-Lingua-CU/master/lib/Lingua/CU/Scripts/ucsequivs
2. Put it into current directory.
3. Run this code to generate Python array "decoding_table"
'''
dt, cm = _build_decoding_table('ucsequivs')
print('decoding_table = (')
for x,c in zip(dt, cm):
if c is not None:
c = ' # ' + c
else:
c = ''
if x == "'": # treat single quote separately to avoid syntax error (is there a better way? - MK)
print('\t"%s",%s' % (x.encode('unicode-escape').decode(), c))
else:
print("\t'%s',%s" % (x.encode('unicode-escape').decode(), c))
print(')') | pgmmpk/cslavonic | cslavonic/ucs_decode.py | Python | mit | 9,501 |
from . import ast
from .pystates import symbols as syms
from .grammar.sourcefile import SourceFile
import token
import six
import re
class ASTError(Exception):
pass
class ASTMeta(type):
def __new__(cls, name, bases, attrs):
handlers = {}
attrs['handlers'] = handlers
newcls = type.__new__(cls, name, bases, attrs)
for k, v in attrs.items():
if k.startswith('handle_'):
sym = k[len('handle_'):]
handlers[syms[sym]] = getattr(newcls, k)
return newcls
operator_map = {
'+': ast.Add,
'+=': ast.Add,
'-': ast.Sub,
'-=': ast.Sub,
'*': ast.Mult,
'*=': ast.Mult,
'/': ast.Div,
'/=': ast.Div,
'%': ast.Mod,
'%=': ast.Mod,
'**': ast.Pow,
'**=': ast.Pow,
'<<': ast.LShift,
'<<=': ast.LShift,
'>>': ast.RShift,
'>>=': ast.RShift,
'|': ast.BitOr,
'|=': ast.BitOr,
'^': ast.BitXor,
'^=': ast.BitXor,
'&': ast.BitAnd,
'&=': ast.BitAnd,
'//': ast.FloorDiv,
'//=': ast.FloorDiv,
}
compare_map = {
'==': ast.Eq,
'!=': ast.NotEq,
'<': ast.Lt,
'<=': ast.LtE,
'>': ast.Gt,
'>=': ast.GtE,
'is': ast.Is,
'is not': ast.IsNot,
'in': ast.In,
'not in': ast.NotIn,
}
xdigits = re.compile(r'^[0-9a-z]{2}$', re.IGNORECASE)
@six.add_metaclass(ASTMeta)
class ASTBuilder(object):
def __init__(self, src):
if not isinstance(src, SourceFile):
raise Exception('invalid sourcefile')
self.src = src
self.root = src.parse_tree.root
self.ast = self.build()
def syntax_error(self, msg, node):
return SyntaxError(msg, (self.src.name, node.start[0], node.start[1],
self.src.get_line(node.start[0])))
def build(self):
n = self.root
if n == syms.single_input:
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
if n[0] == token.NEWLINE:
return ast.Interactive([])
return ast.Interactive(self.handle(n[0]))
elif n == syms.file_input:
# file_input: (NEWLINE | stmt)* ENDMARKER
stmts = []
for stmt in n.filter(syms.stmt):
stmts.extend(self.handle(stmt[0]))
return ast.Module(stmts)
elif n == syms.eval_input:
# eval_input: testlist NEWLINE* ENDMARKER
return ast.Expression(self.handle_testlist(n[0]))
raise ASTError('invalid root node')
def handle(self, node):
handler = self.handlers.get(node.type, None)
if handler is None:
raise ASTError('invalid node: %r', node)
return handler(self, node)
def handle_stmt(self, stmt):
# stmt: simple_stmt | compound_stmt
if stmt[0] == syms.simple_stmt:
return self.handle_simple_stmt(stmt[0])
return [self.handle(stmt[0][0])]
def handle_simple_stmt(self, simple_stmt):
# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
# small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
# import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
stmts = []
for small_stmt in simple_stmt.filter(syms.small_stmt):
stmts.append(self.handle(small_stmt[0]))
return stmts
def handle_compound_stmt(self, compound_stmt):
# compound_stmt: (if_stmt | while_stmt | for_stmt |
# try_stmt | with_stmt | funcdef)
return [self.handle(compound_stmt[0])]
def handle_testlist(self, testlist):
# testlist: test (',' test)* [',']
if len(testlist) == 1:
return self.handle_test(testlist[0])
exprs = []
for test in testlist.filter(syms.test):
exprs.append(self.handle_test(test))
return ast.Tuple(exprs, ast.Load, *testlist.start)
def handle_test(self, test):
# test: or_test ['if' or_test 'else' test] | lambdef
if len(test) == 1:
if test[0] == syms.lambdef:
return self.handle_lambdef(test[0])
return self.handle_or_test(test[0])
body = self.handle_or_test(test[0])
te = self.handle_or_test(test[2])
orelse = self.handle_test(test[4])
return ast.IfExp(te, body, orelse, *test.start)
def handle_or_test(self, or_test):
# or_test: and_test ('or' and_test)*
if len(or_test) == 1:
return self.handle_and_test(or_test[0])
return ast.BoolOp(ast.Or,
[self.handle_and_test(x) for x in or_test.filter(syms.and_test)],
*or_test.start)
def handle_and_test(self, and_test):
#and_test: not_test ('and' not_test)*
if len(and_test) == 1:
return self.handle_not_test(and_test[0])
return ast.BoolOp(ast.And,
[self.handle_not_test(x) for x in and_test.filter(syms.not_test)],
*and_test.start)
def handle_not_test(self, node):
# not_test: 'not' not_test | comparison
if len(node) == 2:
return ast.UnaryOp(
ast.Not, self.handle_not_test(node[1]), *node.start)
# comparison: expr (comp_op expr)*
# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
node = node[0]
expr = self.handle_expr(node[0])
if len(node) == 1:
return expr
operators = []
operands = []
for i in range(1, len(node), 2):
if len(node[i]) == 1:
op = node[i][0].val
else:
op = '%s %s' % (node[i][0].val, node[i][1].val)
operators.append(compare_map[op])
operands.append(self.handle_expr(node[i + 1]))
return ast.Compare(expr, operators, operands, *node.start)
def handle_lambdef(self, node):
# lambdef: 'lambda' [varargslist] ':' test
if len(node) == 3:
args = ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=None, kwargannotation=None,
defaults=[], kw_defaults=[])
else:
args = self.handle_varargslist(node[1])
return ast.Lambda(args, self.handle_test(node[-1]), *node.start)
def handle_varargslist(self, node):
# typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [','
# ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
# | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
# tfpdef: NAME [':' test]
# varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
# ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
# | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
# vfpdef: NAME
if node[0].val == '**':
kwarg = node[1][0].val
kwargannotation = node[1][2].val if len(node[1]) == 3 else None
return ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=kwarg, kwargannotation=kwargannotation,
defaults=[], kw_defaults=[])
elif node[0].val == '*':
vararg, i = node[1][0].val, 3
varargannotation = node[1][2].val if len(node[1]) == 3 else None
kwonlyargs = []
kw_defaults = []
while i < len(node) and node[i].val != '**':
arg = ast.arg(node[i][0].val, None)
if len(node[i]) == 3:
arg.annotation = node[i][2].val
kwonlyargs.append(arg)
if node[i + 1].val == '=':
kw_defaults.append(self.handle_test(node[i + 2]))
i += 4
else:
i += 2
if i < len(node) and node[i].val == '**':
kwarg = node[i + 1][0].val
kwargannotation = node[i + 1][2] if len(node[i + 1]) == 3 else None
else:
kwarg, kwargannotation = None, None
return ast.arguments(args=[], vararg=vararg,
varargannotation=varargannotation, kwonlyargs=kwonlyargs,
kwarg=kwarg, kwargannotation=kwargannotation,
defaults=[], kw_defaults=kw_defaults)
i = 0
args = []
defaults = []
while i < len(node) and node[i] != token.OP:
arg = ast.arg(node[i][0].val, None)
if len(node[i]) == 3:
arg.annotation = node[i][2].val
args.append(arg)
if i + 1 < len(node) and node[i + 1].val == '=':
defaults.append(self.handle_test(node[i + 2]))
i += 4
elif len(defaults) > 0:
# TODO: get line
raise self.syntax_error(
'non-default argument follows default argument', node)
else:
i += 2
if i < len(node):
argument = self.handle_varargslist(node.subs[i:])
argument.args = args
argument.defaults = defaults
return argument
return ast.arguments(args=args, vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=None, kwargannotation=None, defaults=defaults,
kw_defaults=[])
handle_typedargslist = handle_varargslist
def handle_expr(self, node):
# expr: xor_expr ('|' xor_expr)*
# xor_expr: and_expr ('^' and_expr)*
# and_expr: shift_expr ('&' shift_expr)*
# shift_expr: arith_expr (('<<'|'>>') arith_expr)*
# arith_expr: term (('+'|'-') term)*
# term: factor (('*'|'/'|'%'|'//') factor)*
if node == syms.factor:
return self.handle_factor(node)
if len(node) == 1:
return self.handle_expr(node[0])
binop = ast.BinOp(
self.handle_expr(node[0]),
operator_map[node[1].val],
self.handle_expr(node[2]),
*node.start)
for i in range(3, len(node), 2):
binop = ast.BinOp(binop, operator_map[node[i].val],
self.handle_expr(node[i + 1]), *node.start)
return binop
def handle_factor(self, node):
# factor: ('+'|'-'|'~') factor | power
if len(node) == 1:
return self.handle_power(node[0])
uop = node[0].val
if uop == '+':
uop = ast.UAdd
elif uop == '-':
uop = ast.USub
else:
uop = ast.Invert
return ast.UnaryOp(uop, self.handle_factor(node[1]), *node.start)
def handle_power(self, node):
# power: atom trailer* ['**' factor]
atom = self.handle_atom(node[0])
if len(node) == 1:
return atom
for n in node.subs[1:]:
if n != syms.trailer:
break
atom = self.get_trailer(n, atom)
if node[-1] == syms.factor:
return ast.BinOp(
atom, ast.Pow, self.handle_factor(node[-1]), *node.start)
return atom
def get_trailer(self, node, atom):
# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
v = node[0].val
if v == '.':
return ast.Attribute(atom, node[1].val, ast.Load, *node.start)
elif v == '(':
if len(node) == 2:
return ast.Call(atom, [], [], None, None, *node.start)
args, keywords, starargs, kwargs = self.get_arglist(node[1])
return ast.Call(atom, args, keywords, starargs, kwargs, *node.start)
return self.get_subscriptlist(atom, node[1])
def get_subscriptlist(self, left, node):
# subscriptlist: subscript (',' subscript)* [',']
# subscript: test | [test] ':' [test] [sliceop]
# sliceop: ':' [test]
if len(node) == 1:
sl = self.get_slice(node[0])
return ast.Subscript(left, sl, ast.Load, *node.start)
slices = []
for n in node.filter(syms.subscript):
slices.append(self.get_slice(n))
extsl = ast.ExtSlice(slices)
return ast.Subscript(left, extsl, ast.Load, *node.start)
def get_slice(self, node):
# subscript: test | [test] ':' [test] [sliceop]
# sliceop: ':' [test]
if len(node) == 1:
if node[0] == syms.test:
return ast.Index(self.handle_test(node[0]))
return ast.Slice(None, None, None)
if node[0] == syms.test:
lower = self.handle_test(node[0])
next = 2
else:
lower, next = None, 1
if next < len(node):
upper, next = self.handle_test(node[next]), next + 1
step = None
if next < len(node):
sliceop = node[next]
if len(sliceop) == 2:
step = self.handle_test(sliceop[1])
return ast.Slice(lower, upper, step)
return ast.Slice(lower, None, None)
def get_arglist(self, node):
# arglist: (argument ',')* (argument [',']
# |'*' test (',' argument)* [',' '**' test]
# |'**' test)
# return args, keywords, starargs, kwargs
args, keywords, starargs, kwargs = [], [], None, None
i = 0
while i < len(node) and node[i] == syms.argument:
arg = self.handle_argument(node[i])
if isinstance(arg, ast.keyword):
keywords.append(arg)
elif len(keywords) == 0:
args.append(arg)
else:
raise self.syntax_error('non-keyword arg after keyword arg', node)
i += 2
if i >= len(node):
pass
elif node[i].val == '*':
starargs = self.handle_test(node[i + 1])
i += 3
while i < len(node) and node[i] == syms.argument:
kw = self.handle_argument(node[i])
if not isinstance(kw, ast.keyword):
raise self.syntax_error(
'only named arguments may follow *expression', node)
keywords.append(kw)
i += 2
if i < len(node):
kwargs = self.handle_test(node[i + 1])
else:
kwargs = self.handle_test(node[i + 1])
return args, keywords, starargs, kwargs
def handle_argument(self, node):
# argument: test [comp_for] | test '=' test
if len(node) == 1:
return self.handle_test(node[0])
elif len(node) == 3:
k = self.handle_test(node[0])
if not isinstance(k, ast.Name):
raise self.syntax_error(
'keyword must be a NAME', *node[0].start)
v = self.handle_test(node[2])
return ast.keyword(k.id, v)
return ast.GeneratorExp(self.handle_test(node[0]),
self.get_comp_for(node[1]), *node.start)
def get_comp_for(self, node):
# comp_for: 'for' exprlist 'in' or_test [comp_iter]
# comp_iter: comp_for | comp_if
target = self.handle_exprlist(node[1])
if not isinstance(target, ast.AssignTypes):
raise self.syntax_error(
'invalid assign to %s' % type(target).__name__, node[1])
self.loop_mark_ctx(target, ast.Store)
compfor = ast.comprehension(target, self.handle_or_test(node[3]), [])
if len(node) == 4:
return [compfor]
if node[-1][0] == syms.comp_if:
tails = self.get_comp_if(node[-1][0])
else:
tails = self.get_comp_for(node[-1][0])
ifs, i = [], 0
while i < len(tails) and isinstance(tails[i], ast.Compare):
ifs.append(tails[i])
i += 1
compfor.ifs = ifs
return [compfor] + tails[i:]
def get_comp_if(self, node):
# comp_if: 'if' test_nocond [comp_iter]
# comp_iter: comp_for | comp_if
comp = self.test_nocond(node[1])
if len(node) == 3:
if node[2][0] == syms.comp_if:
subs = self.get_comp_if(node[2][0])
else:
subs = self.get_comp_for(node[2][0])
return [comp] + subs
return [comp]
def handle_exprlist(self, node):
# exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
exprs = []
for n in node.subs:
if n == syms.expr:
exprs.append(self.handle_expr(n))
elif n == syms.star_expr:
exprs.append(self.handle_star_expr(n))
if len(exprs) == 1 and node[-1] != token.OP:
return exprs[0]
return ast.Tuple(exprs, ast.Store, *node.start)
def handle_test_nocond(self, node):
# test_nocond: or_test | lambdef_nocond
# lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
if node[0] == syms.or_test:
return self.handle_or_test(node[0])
node = node[0]
if len(node) == 3:
args = ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=None, kwargannotation=None,
defaults=[], kw_defaults=[])
else:
args = self.handle_varargslist(node[1])
return ast.Lambda(args, self.handle_test_nocond(node[-1]), *node.start)
def handle_atom(self, node):
# atom: ('(' [yield_expr|testlist_comp] ')' |
# '[' [testlist_comp] ']' |
# '{' [dictorsetmaker] '}' |
# NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False')
n = node[0]
if n == token.NAME:
return ast.Name(n.val, ast.Load, *node.start)
elif n == token.NUMBER:
return ast.Num(eval(n.val), *node.start)
elif n.val == '...':
return ast.Ellipsis(*node.start)
elif n == token.STRING:
return self.get_string(node)
elif n.val == '(':
if len(node) == 2:
return ast.Tuple([], ast.Load, *node.start)
if node[1] == syms.yield_expr:
return self.handle_yield_expr(node[1])
return self.get_testlist_comp('(', node[1])
elif n.val == '[':
if len(node) == 2:
return ast.List([], ast.Load, *node.start)
return self.get_testlist_comp('[', node[1])
else:
if len(node) == 2:
return ast.Dict([], [], *node.start)
return self.handle_dictorsetmaker(node[1])
def get_string(self, node):
head = self.parse_string(node[0])
if len(node) == 1:
return head
t = type(head)
strs = [head]
for n in node.subs[1:]:
s = self.parse_string(n)
if not isinstance(s, t):
raise self.syntax_error(
'cannot mix bytes and nonbytes literals', node)
strs.append(s)
if t is ast.Str:
return ast.Str(''.join(strs), *node.start)
return ast.Bytes(b''.join(strs), *node.start)
def parse_string(self, node):
is_str, is_re = True, False
s = node.val
pos = 0
while True:
c = s[pos]
if c == 'b':
is_str = False
elif c == 'r':
is_re = True
elif c == 'u':
pass
else:
if s[pos] == s[pos + 1]:
s = s[pos + 3:-3]
else:
s = s[pos + 1:-1]
break
pos += 1
if is_re:
if is_str:
return ast.Str(s, *node.start)
return ast.Bytes(s, *node.start)
chars = []
pos = 0
while pos < len(s):
c = s[pos]
if c == '\\':
if pos == len(s) - 1:
raise self.syntax_error(
'EOL while scanning string literal', *node.end)
pos += 1
next = s[pos]
if next == "'":
v = "'" if is_str else ord("'")
elif next == '"':
v = '"' if is_str else ord('"')
elif next == 'b':
v = '\b' if is_str else ord('\b')
elif next == 'f':
v = '\f' if is_str else ord('\f')
elif next == 't':
v = '\t' if is_str else ord('\t')
elif next == 'n':
v = '\n' if is_str else ord('\n')
elif next == 'r':
v = '\r' if is_str else ord('\r')
elif next == 'v':
v = '\v' if is_str else ord('\v')
elif next == 'a':
v = '\a' if is_str else ord('\a')
elif next == 'x':
if pos + 2 >= len(s) - 1:
raise self.syntax_error(
'truncated \\xXX escape', node)
xs = s[pos:pos + 2]
if not xdigits.match(xs):
raise self.syntax_error('invalid \\xXX escape', node)
pos += 2
n = eval('0x' + xs)
v = chr(n) if is_str else n
elif next in '01234567':
n = int(next)
if pos + 1 < len(s) and s[pos + 1] in '01234567':
pos += 1
n = n * 8 + int(s[pos])
if pos + 1 < len(s) and s[pos + 1] in '01234567':
pos += 1
n = n * 8 + int(s[pos])
v = chr(n) if is_str else n
else:
v = '\\' if is_str else ord('\\')
chars.append(v)
else:
chars.append(c)
pos += 1
if is_str:
return ast.Str(''.join(chars), *node.start)
return ast.Bytes(bytes(chars), *node.start)
def handle_yield_expr(self, node):
# yield_expr: 'yield' [yield_arg]
# yield_arg: 'from' test | testlist
if len(node) == 1:
return ast.Yield(None, *node.start)
if len(node[1]) == 1:
testlist = self.handle_testlist(node[1][0])
return ast.Yield(testlist, *node.start)
test = self.handle_test(node[1][1])
return ast.YieldFrom(test, *node.start)
def get_testlist_comp(self, outter, node):
# testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
if node[0] == syms.test:
expr = self.handle_test(node[0])
else:
expr = self.handle_star_expr(node[0])
if len(node) == 1:
# (test|star_expr)
if outter == '(':
return expr
return ast.List([expr], ast.Load, *node.start)
if node[1] == syms.comp_for:
# (test|star_expr) comp_for
generators = self.get_comp_for(node[1])
return ast.GeneratorExp(expr, generators, *node.start)
# (test|star_expr) (',' (test|star_expr))* [',']
i = 2
elts = [expr]
while i < len(node):
if node[i] == syms.test:
elts.append(self.handle_test(node[i]))
else:
elts.append(self.handle_star_expr(node[i]))
i += 2
if outter == '(':
return ast.Tuple(elts, ast.Load, *node.start)
return ast.List(elts, ast.Load, *node.start)
def handle_dictorsetmaker(self, node):
# dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
if len(node) > 1 and node[1] == token.OP:
if node[3] == syms.comp_for:
# test ':' test comp_for
k = self.handle_test(node[0])
v = self.handle_test(node[2])
generators = self.get_comp_for(node[3])
return ast.DictComp(k, v, generators, *node.start)
# test ':' test (',' test ':' test)* [',']
i = 0
keys, values = [], []
while i < len(node):
keys.append(self.handle_test(node[i]))
values.append(self.handle_test(node[i + 2]))
i += 4
return ast.Dict(keys, values, *node.start)
# (test (comp_for | (',' test)* [',']))
if len(node) > 1 and node[1] == syms.comp_for:
# test comp_for
elt = self.handle_test(node[0])
generators = self.get_comp_for(node[1])
return ast.SetComp(elt, generators, *node.start)
# test (',' test)* [',']
elts = []
i = 0
while i < len(node):
elts.append(self.handle_test(node[i]))
i += 2
return ast.Set(elts, *node.start)
def handle_expr_stmt(self, node):
# expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
# ('=' (yield_expr|testlist_star_expr))*)
# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
# '<<=' | '>>=' | '**=' | '//=')
expr = self.handle_testlist_star_expr(node[0])
if len(node) == 1:
return ast.Expr(expr, *node.start)
if not isinstance(expr, ast.AssignTypes):
raise self.syntax_error(
'invalid assign to %s' % type(expr).__name__, node)
self.loop_mark_ctx(expr, ast.Store)
if node[1] == syms.augassign:
op = operator_map[node[1][0].val]
if node[2] == syms.yield_expr:
return ast.AugAssign(
expr, op, self.handle_yield_expr(node[2]), *node.start)
return ast.AugAssign(
expr, op, self.handle_testlist(node[2]), *node.start)
targets, i = [expr], 2
for i in range(2, len(node) - 1, 2):
if node[i] == syms.yield_expr:
t = self.handle_yield_expr(node[i])
else:
t = self.handle_testlist_star_expr(node[i])
if not isinstance(t, ast.AssignTypes):
raise self.syntax_error(
'invalid assign to %s' % type(t).__name__, node[i])
self.loop_mark_ctx(t, ast.Store)
targets.append(t)
i += 2
if node[-1] == syms.yield_expr:
value = self.handle_yield_expr(node[-1])
else:
value = self.handle_testlist_star_expr(node[-1])
self.loop_mark_ctx(value, ast.Load)
return ast.Assign(targets, value, *node.start)
@classmethod
def loop_mark_ctx(cls, obj, ctx):
obj.ctx = ctx
if isinstance(obj, (ast.List, ast.Tuple)):
for elt in obj.elts:
cls.loop_mark_ctx(elt, ctx)
def handle_testlist_star_expr(self, node):
# testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
exprs = []
for n in node.subs:
if n == syms.test:
exprs.append(self.handle_test(n))
elif n == syms.star_expr:
exprs.append(self.handle_star_expr(n))
if len(exprs) == 1 and node[-1] != token.OP:
return exprs[0]
return ast.Tuple(exprs, ast.Store, *node.start)
def handle_star_expr(self, node):
# star_expr: '*' expr
return ast.Starred(self.handle_expr(node[1]), ast.Store, *node.start)
def handle_del_stmt(self, node):
# del_stmt: 'del' exprlist
expr = self.handle_exprlist(node[1])
self.loop_mark_ctx(expr, ast.Del)
if isinstance(expr, ast.Tuple):
return ast.Delete(expr.elts, *node.start)
return ast.Delete([expr], *node.start)
def handle_pass_stmt(self, node):
# pass_stmt: 'pass'
return ast.Pass(*node.start)
def handle_flow_stmt(self, node):
# flow_stmt: break_stmt | continue_stmt | return_stmt
# | raise_stmt | yield_stmt
# return_stmt: 'return' [testlist]
# break_stmt: 'break'
# continue_stmt: 'continue'
# yield_stmt: yield_expr
# raise_stmt: 'raise' [test ['from' test]]
node = node[0]
if node == syms.return_stmt:
if len(node) == 2:
return ast.Return(self.handle_testlist(node[1]), *node.start)
return ast.Return(None, *node.start)
elif node == syms.break_stmt:
return ast.Break(*node.start)
elif node == syms.continue_stmt:
return ast.Continue(*node.start)
elif node == syms.yield_stmt:
return ast.Expr(self.handle_yield_expr(node[0]), *node.start)
exc = len(node) > 1 and self.handle_test(node[1]) or None
cause = len(node) == 4 and self.handle_test(node[3]) or None
return ast.Raise(exc, cause, *node.start)
def handle_import_stmt(self, node):
if node[0] == syms.import_name:
return self.handle_import_name(node[0])
return self.handle_import_from(node[0])
def handle_import_name(self, node):
# import_name: 'import' dotted_as_names
# dotted_as_names: dotted_as_name (',' dotted_as_name)*
# dotted_as_name: dotted_name ['as' NAME]
alias = []
for n in node[1].filter(syms.dotted_as_name):
name = self.handle_dotted_name(n[0])
if len(n) == 1:
alias.append(ast.alias(name, None))
else:
alias.append(ast.alias(name, n[2].val))
return ast.Import(alias, *node.start)
def handle_import_from(self, node):
# import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
# 'import' ('*' | '(' import_as_names ')' | import_as_names))
level = 0
for i in range(1, len(node)):
if node[i] != token.OP:
break
level += len(node[i].val)
if node[i] == syms.dotted_name:
module = self.handle_dotted_name(node[i])
i += 1
else:
module = None
v = node[i + 1].val
if v == '*':
names = [ast.alias('*', None)]
elif v == '(':
names = self.handle_import_as_names(node[i + 2])
else:
names = self.handle_import_as_names(node[i + 1])
return ast.ImportFrom(module, names, level, *node.start)
def handle_dotted_name(self, node):
# dotted_name: NAME ('.' NAME)*
return '.'.join([n.val for n in node.filter(token.NAME)])
def handle_import_as_names(self, node):
# import_as_name: NAME ['as' NAME]
# import_as_names: import_as_name (',' import_as_name)* [',']
names = []
for n in node.filter(syms.import_as_name):
if len(n) == 1:
names.append(ast.alias(n[0].val, None))
else:
names.append(ast.alias(n[0].val, n[2].val))
return names
def handle_global_stmt(self, node):
# global_stmt: 'global' NAME (',' NAME)*
return ast.Global(list(node.filter(token.NAME)), *node.start)
def handle_nonlocal_stmt(self, node):
# nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
return ast.Nonlocal(list(node.filter(token.NAME)), *node.start)
def handle_assert_stmt(self, node):
# assert_stmt: 'assert' test [',' test]
test = self.handle_test(node[1])
if len(node) == 2:
msg = None
else:
msg = self.handle_test(node[3])
return ast.Assert(test, msg, *node.start)
def handle_suite(self, node, get_stmts=False):
# suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
if len(node) == 1:
stmts = self.handle_simple_stmt(node[0])
if get_stmts:
return stmts
return ast.Suite(stmts)
stmts = []
for i in range(2, len(node) - 1):
stmts.extend(self.handle_stmt(node[i]))
if get_stmts:
return stmts
return ast.Suite(stmts)
def handle_if_stmt(self, node):
# if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
test = self.handle_test(node[1])
body = self.handle_suite(node[3], get_stmts=True)
ifexpr = ast.If(test, body, [], *node.start)
cur, i = ifexpr, 4
while i < len(node) and node[i].val == 'elif':
test = self.handle_test(node[i + 1])
body = self.handle_suite(node[i + 3], get_stmts=True)
expr = ast.If(test, body, [], *node[i].start)
cur.orelse.append(expr)
cur = expr
i += 4
if i < len(node):
cur.orelse = self.handle_suite(node[-1], get_stmts=True)
return ifexpr
def handle_while_stmt(self, node):
# while_stmt: 'while' test ':' suite ['else' ':' suite]
test = self.handle_test(node[1])
body = self.handle_suite(node[3], get_stmts=True)
if len(node) == 4:
orelse = []
else:
orelse = self.handle_suite(node[6], get_stmts=True)
return ast.While(test, body, orelse, *node.start)
def handle_for_stmt(self, node):
# for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
target = self.handle_exprlist(node[1])
if not isinstance(target, ast.AssignTypes):
raise self.syntax_error(
'invalid assign to %s' % type(target).__name__, node[1])
self.loop_mark_ctx(target, ast.Store)
iterator = self.handle_testlist(node[3])
body = self.handle_suite(node[5], get_stmts=True)
if len(node) == 6:
orelse = []
else:
orelse = self.handle_suite(node[8], get_stmts=True)
return ast.For(target, iterator, body, orelse, *node.start)
def handle_try_stmt(self, node):
# try_stmt: ('try' ':' suite
# ((except_clause ':' suite)+
# ['else' ':' suite]
# ['finally' ':' suite] |
# 'finally' ':' suite))
# except_clause: 'except' [test ['as' NAME]]
body = self.handle_suite(node[2], get_stmts=True)
handlers, i = [], 3
while i < len(node) and node[i] == syms.except_clause:
expnode = node[i]
exptype = len(expnode) > 1 and self.handle_test(expnode[1]) or None
expname = len(expnode) > 2 and expnode[3].val or None
expbody = self.handle_suite(node[i + 2], get_stmts=True)
handlers.append(
ast.ExceptHandler(exptype, expname, expbody, *expnode.start))
i += 3
orelse = []
if i < len(node) and node[i].val == 'else':
orelse = self.handle_suite(node[i + 2], get_stmts=True)
i += 3
finalbody = []
if i < len(node) and node[i].val == 'finally':
finalbody = self.handle_suite(node[i + 2], get_stmts=True)
return ast.Try(body, handlers, orelse, finalbody, *node.start)
def handle_with_stmt(self, node):
# with_stmt: 'with' with_item (',' with_item)* ':' suite
# with_item: test ['as' expr]
items, i = [], 1
while i < len(node) and node[i] == syms.with_item:
wnode = node[i]
item = ast.withitem(self.handle_test(wnode[0]), None)
if len(wnode) == 3:
item.optional_vars = self.handle_test(wnode[2])
items.append(item)
i += 2
body = self.handle_suite(node[-1], get_stmts=True)
return ast.With(items, body, *node.start)
def handle_funcdef(self, node):
# funcdef: 'def' NAME parameters ['->' test] ':' suite
# parameters: '(' [typedargslist] ')'
name = node[1].val
if len(node[2]) == 2:
params = ast.arguments(args=[], vararg=None, varargannotation=None,
kwonlyargs=[], kwarg=None, kwargannotation=None,
defaults=[], kw_defaults=[])
else:
params = self.handle_typedargslist(node[2][1])
if node[3].val == ':':
returns = None
else:
returns = self.handle_test(node[4])
body = self.handle_suite(node[-1], get_stmts=True)
return ast.FunctionDef(name, params, body, [], returns, *node.start)
def handle_classdef(self, node):
# classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
name = node[1].val
if len(node) == 7:
bases, keywords, starargs, kwargs = self.get_arglist(node[3])
else:
bases, keywords, starargs, kwargs = [], [], None, None
body = self.handle_suite(node[-1], get_stmts=True)
return ast.ClassDef(
name, bases, keywords, starargs, kwargs, body, [], *node.start)
def handle_decorated(self, node):
# decorated: decorators (classdef | funcdef)
# decorators: decorator+
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# dotted_name: NAME ('.' NAME)*
if node[1] == syms.funcdef:
funccls = self.handle_funcdef(node[1])
else:
funccls = self.handle_classdef(node[1])
decs, i = [], 0
for n in node[0]:
name = self.get_attribute(n[1])
if n[2].val == '(':
if len(n) == 6:
args, keywords, starargs, kwargs = self.get_arglist(n[3])
else:
args, keywords, starargs, kwargs = [], [], None, None
decs.append(ast.Call(
name, args, keywords, starargs, kwargs, *n[1].start))
else:
decs.append(name)
funccls.decorator_list = decs
return funccls
def get_attribute(self, node):
# dotted_name: NAME ('.' NAME)*
first = ast.Name(node[0].val, ast.Load, *node.start)
if len(node) == 1:
return first
attr = ast.Attribute(first, node[2].val, ast.Load, *node.start)
i = 4
while i < len(node):
attr = ast.Attribute(attr, node[i].val, ast.Load, *node.start)
i += 2
return attr
| lodevil/cpy | cpy/parser/ast_builder.py | Python | mit | 38,253 |
# -*- coding: utf-8 -*-
class BaseAI(object):
player = None
table = None
def __init__(self, player):
self.player = player
def discard_tile(self):
pass
| huangenyan/Lattish | project/mahjong/ai/base.py | Python | mit | 187 |
import time
import arcpy
from arcpy import env
from arcpy.sa import *
# Set environment settings
env.workspace = "" # set your workspace
arcpy.env.overwriteOutput = True
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
tic = time.clock()
a_file = "random_a.tif"
b_file = "random_b.tif"
c_file = "random_c.tif"
out_file = "output.tif"
a = Raster(a_file)
b = Raster(b_file)
c = Raster(c_file)
out = 3 * a + b * c
out.save(out_file) | ahhz/raster | benchmarks/benchmark_3_layers_arcpy.py | Python | mit | 478 |
#!/usr/bin/python
import hashlib
# perl
## http://stackoverflow.com/questions/9991757/sha256-digest-in-perl
#use Digest::MD5 qw(md5_hex);
#print md5_hex('swaranga@gmail.com'), "\n";
perl_result = "cbc41284e23c8c7ed98f589b6d6ebfd6"
md5 = hashlib.md5()
md5.update('swaranga@gmail.com')
hex1 = md5.hexdigest()
if hex1 == perl_result:
print "ok"
else:
print "FAIL perl_result = %s" % str(perl_result)
print "FAIL hex1 = %s" % str(hex1)
| jtraver/dev | python/hashlib/md5.py | Python | mit | 457 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'unique_cards'
db.delete_table(u'card_game_unique_cards')
# Deleting model 'cards'
db.delete_table(u'card_game_cards')
# Deleting model 'versions'
db.delete_table(u'card_game_versions')
# Deleting model 'unique_versions'
db.delete_table(u'card_game_unique_versions')
# Adding model 'Unique_Card'
db.create_table(u'card_game_unique_card', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.Card'])),
))
db.send_create_signal(u'card_game', ['Unique_Card'])
# Adding model 'Card'
db.create_table(u'card_game_card', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('cost', self.gf('django.db.models.fields.IntegerField')(default=0)),
('art', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('power', self.gf('django.db.models.fields.IntegerField')(default=0)),
('toughness', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'card_game', ['Card'])
# Adding model 'Version'
db.create_table(u'card_game_version', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('version_number', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.Unique_Version'])),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.Card'])),
))
db.send_create_signal(u'card_game', ['Version'])
# Adding model 'Unique_Version'
db.create_table(u'card_game_unique_version', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256)),
('creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'card_game', ['Unique_Version'])
def backwards(self, orm):
# Adding model 'unique_cards'
db.create_table(u'card_game_unique_cards', (
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.cards'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'card_game', ['unique_cards'])
# Adding model 'cards'
db.create_table(u'card_game_cards', (
('toughness', self.gf('django.db.models.fields.IntegerField')(default=1)),
('art', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('power', self.gf('django.db.models.fields.IntegerField')(default=0)),
('text', self.gf('django.db.models.fields.TextField')()),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cost', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'card_game', ['cards'])
# Adding model 'versions'
db.create_table(u'card_game_versions', (
('version_number', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.unique_versions'])),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.cards'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'card_game', ['versions'])
# Adding model 'unique_versions'
db.create_table(u'card_game_unique_versions', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128, unique=True)),
('creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'card_game', ['unique_versions'])
# Deleting model 'Unique_Card'
db.delete_table(u'card_game_unique_card')
# Deleting model 'Card'
db.delete_table(u'card_game_card')
# Deleting model 'Version'
db.delete_table(u'card_game_version')
# Deleting model 'Unique_Version'
db.delete_table(u'card_game_unique_version')
models = {
u'card_game.card': {
'Meta': {'object_name': 'Card'},
'art': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'power': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'toughness': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'card_game.unique_card': {
'Meta': {'object_name': 'Unique_Card'},
'card_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Card']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'card_game.unique_version': {
'Meta': {'object_name': 'Unique_Version'},
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'card_game.version': {
'Meta': {'object_name': 'Version'},
'card_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Card']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Unique_Version']"})
}
}
complete_apps = ['card_game'] | AndrewRook/game_designer | card_game/migrations/0002_auto__del_unique_cards__del_cards__del_versions__del_unique_versions__.py | Python | mit | 7,131 |
import json
def load(ctx):
with open(ctx.obj["data_location"], "r") as f:
return json.load(f)
def save(ctx, map_obj):
with open(ctx.obj["data_location"], "w") as f:
json.dump(map_obj, f, indent=4)
| jghibiki/Cursed | utils.py | Python | mit | 223 |
import logging
import pygame
from .. import Collage
class SimpleResize(Collage):
"""
Example class for collage plugins
- Takes a single image and resizes it
"""
name = 'simple resize'
def __init__(self, config):
super(SimpleResize, self).__init__(config)
def generate(self, size):
wallpapers = self._get_wallpapers()
logging.debug('Generating...')
collage = pygame.Surface(size)
wp_offset, wp = self._resize_wallpaper(wallpapers[0], size)
collage.blit(wp, (0,0), pygame.Rect(wp_offset, size))
logging.debug('Generation complete')
return collage
def _get_wallpapers(self):
return self.wallpaper_source.pop()
| loktacar/wallpapermaker | plugins/simple_resize/simple_resize.py | Python | mit | 734 |
import warnings
from collections import OrderedDict
from honeybadger.plugins import default_plugin_manager
from .contrib.test_django import DjangoMiddlewareTestCase
from honeybadger.middleware import DjangoHoneybadgerMiddleware
__all__ = ['MiddlewareTestCase']
class MiddlewareTestCase(DjangoMiddlewareTestCase):
def test_middleware_import_warning(self):
default_plugin_manager._registered = OrderedDict()
with warnings.catch_warnings(record=True) as w:
middleware = DjangoHoneybadgerMiddleware()
assert len(w) == 1
assert issubclass(w[-1].category, FutureWarning)
assert "moved" in str(w[-1].message)
| honeybadger-io/honeybadger-python | honeybadger/tests/test_middleware.py | Python | mit | 674 |
def get_all(tordb):
return tordb.find()
def delete(tordb, obj_id):
tordb.remove([obj_id])
def insert(tordb, obj):
return tordb.insert(obj)
def update_full(tordb, id, obj):
tordb.update({'_id': id}, {'$set': obj})
| noahgoldman/torwiz | torrents/torrents/database.py | Python | mit | 232 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('discussions', '0004_auto_20150430_1641'),
]
operations = [
migrations.AlterField(
model_name='discussion',
name='original_post',
field=models.OneToOneField(null=True, to='discussions.Post', related_name='OP'),
),
]
| ZackYovel/studybuddy | server/studybuddy/discussions/migrations/0005_auto_20150430_1645.py | Python | mit | 459 |
from .plot import * | jacobdein/nacoustik | nacoustik/plot/__init__.py | Python | mit | 19 |
"""simpledrf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^employee/', include('employee.urls')),
]
| Calzzetta/Simple-Django-REST-Framework-Demo | simpledrf/urls.py | Python | mit | 825 |
import posixpath
class UrlPackage:
""" Represents a package specified as a Url """
def __init__(self, url):
""" Initialize with the url """
if ':' in url:
self.url = url
else:
self.url = posixpath.join('git+git://github.com', url)
@property
def installAs(self):
""" Return the string to use to Install the package via pip """
return self.url
def forRequirements(self, versions):
""" Return the text to use for adding to the Requirements file """
return self.url | cloew/tidypip | tidypip/packages/url_package.py | Python | mit | 604 |
import sublime, sublime_plugin, requests
from xml.etree import ElementTree as ET
class WolframAlphaLookupCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings("Preferences.sublime-settings")
if settings.has("wolfram_api_key"):
API_KEY = settings.get("wolfram_api_key")
for region in self.window.active_view().sel():
if not region.empty():
query = self.window.active_view().substr(region)
else:
query = self.window.active_view().substr(self.window.active_view().line(region))
query = query.strip()
r = requests.get("http://api.wolframalpha.com/v2/query", params={
"input": query,
"appid": API_KEY
})
root = ET.fromstring(r.text)
if root.get('success') == 'true':
items = list()
for pod in root.iter('pod'):
title = pod.attrib.get('title')
plaintext = pod.find('./subpod/plaintext').text
if title and plaintext:
items.append([title, plaintext])
def on_select(index):
if index > -1:
print(items[index])
print(region)
self.window.active_view().run_command("insert_result", {"data": items[index][1]})
self.window.show_quick_panel(items, on_select)
else:
sublime.error_message("Wolfram|Alpha could not understand your query!")
break
else:
sublime.error_message("Please add a \"wolfram_api_key\" to the settings!")
class InsertResultCommand(sublime_plugin.TextCommand):
def run(self, edit, data):
for region in self.view.sel():
if region.empty():
line = self.view.line(region)
self.view.insert(edit, line.end(), '\n' + (data[:-1] if data[-1] == '\n' else data))
else:
self.view.insert(edit, region.end(), data)
break
| PapaCharlie/WolframAlphaLookup | WolframAlphaLookup.py | Python | mit | 2,331 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-4-19 上午11:02
# @Author : Tom.Lee
# @Description :
# @File : helper_os.py
# @Product : PyCharm
import commands
import os
import sys
def shell():
command_ls = 'ls -al /opt'
command_docker = 'docker ps -a'
# 使用os.system()模块
ros = os.system(command_ls)
print '\n\nos.system() : ', ros
# 使用os.popen()模块
output = os.popen(command_docker)
result = output.read()
print '\n\nos.popen() : ', result
# 使用commands模块
(status, output) = commands.getstatusoutput(command_docker)
print '\n\ncommands : ', status, output
def deep_look_dir(dir_path, deep=1, console_full_path=False):
"""
deep_look_dir(dir_name, console_full_path=False)
遍历文件夹下所有文件
:param dir_path: os.path.dirname(__file__)
:param deep:
:param console_full_path:
:return:
"""
if deep == 1:
print dir_path
files = os.listdir(dir_path)
split_symbol = '|_' * deep if deep == 1 else '|' + ' ' * (deep - 1) + '|_'
for f in files:
f_path = os.path.join(dir_path, f)
console_name = f_path if console_full_path else f
if not os.path.isfile(f_path):
print "{sp} {dir_path}/: ".format(
sp=split_symbol,
dir_path=console_name)
num = deep + 1
deep_look_dir(f_path, num, console_full_path)
else:
print split_symbol, console_name
def sys_path():
print '\n\n'
print '当前文件路径: '
print os.path.abspath(__file__)
print '当前文件所在目录: '
print os.path.dirname(__file__)
print '当前文件所在目录的上一级目录: '
print os.path.dirname(os.path.dirname(__file__))
print '当前文件所在目录: '
root_path = os.path.dirname(os.path.abspath(__file__))
print root_path
print '当前文件所在目录同一级的bin目录: '
print os.path.abspath(os.path.join(root_path, '..', 'bin'))
print '目录拼接: '
print os.path.join(os.path.dirname(__file__), "templates")
print os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")
print '添加文件路径到系统环境变量: '
if root_path not in sys.path:
sys.path.append(root_path)
print sys.path
if '__main__' == __name__:
"""TEST MAIN"""
# shell()
# sys_path()
# print os.path.abspath(os.path.join('/opt/tom/', '..', 'bin')) # /opt/bin
# print os.path.dirname('/opt/tom')
print '\033[1;31;m os.listdir() ==> \033[0m', os.listdir(os.getcwd())
print'\033[1;31;m os.getcwd() ==> \033[0m', os.getcwd()
print '\033[1;31;m os.getcwdu() ==> \033[0m', os.getcwdu()
print '\033[1;31;m os.getegid() ==> \033[0m', os.getegid()
print '\033[1;31;m os.getenv() ==> \033[0m', os.getenv('TOM_PATH', '/home/tom')
print '\033[1;31;m os.geteuid() ==> \033[0m', os.geteuid()
print '\033[1;31;m os.getgid() ==> \033[0m', os.getgid()
print '\033[1;31;m os.getgroups() ==> \033[0m', os.getgroups()
print '\033[1;31;m os.getppid() ==> \033[0m', os.getppid()
print '\033[1;31;m os.getpgrp() ==> \033[0m', os.getpgrp()
print '\033[1;31;m os.getresgid() ==> \033[0m', os.getresgid()
print '\033[1;31;m os.getloadavg() ==> \033[0m', os.getloadavg()
print '\033[1;31;m os.geteuid() ==> \033[0m', os.geteuid()
| amlyj/pythonStudy | 2.7/standard_library/study_os.py | Python | mit | 3,462 |
/usr/bin/env python
pandoc -t s5 --self-contained trial.md -o index.html
| Lcmm/SESE | tools/mkslides.py | Python | mit | 75 |
# Copyright (C) 2015 by Per Unneberg
class NotInstalledError(Exception):
"""Error thrown if program/command/application cannot be found in path
Args:
msg (str): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
class SamplesException(Exception):
"""Error thrown if samples missing or wrong number.
Args:
msg (str): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
class OutputFilesException(Exception):
"""Error thrown if outputfiles missing or wrong number.
Args:
msg (str): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
| percyfal/snakemakelib | snakemakelib/exceptions.py | Python | mit | 965 |
"""
Module:
dr2
"""
| barentsen/iphas-dr2 | dr2/__init__.py | Python | mit | 26 |
"""
Generate possible queries from Gates Found grant database.
There are 4 filters and >11K possibilities, Bill returns a max of 1000 results per query combo.
So the hope is that by using all potential queries, we will get everything. Otherwise, their
system is broken too!
"""
import json
import requests
from itertools import product
from gates_constants import PAYLOAD, HEADERS, URL
def get_inital_parameters():
"""Request an unfiltered set of data from grant database to get a list of all filters"""
# Request
r = requests.post(URL, data=json.dumps(PAYLOAD), headers=HEADERS)
r.raise_for_status()
# Return raw JSON
try:
return r.json()
except:
raise Exception("Json response empty in intial request!!!")
def extract_facets(raw_json_response):
"""Extract facets dictionaries from response"""
facets_dict = {}
for f in raw_json_response["facets"]:
facets_dict[f["field"]] = f["items"]
return facets_dict
def create_field_dicts(facets_dict, minimum_count):
"""
Return one dictionary with four entries; each key is a filter category
with the corresponding list of all possible values.
Exclude sets that would return below the minimum_count.
From quick testing:
When minimum_count = 1, unique_queries = 19,403
When minimum_count = 5, unique_queries = 18,521
When minimum_count = 100, unique_queries = 6551
When minimum_count = 150, unique_queries = 4031
"""
master_dict = {}
for key, value in facets_dict.items():
category_set = {key: set()}
for v in value:
if v["count"] >= minimum_count:
category_set[key].add(v["name"])
master_dict.update(category_set)
for k, v in master_dict.items():
master_dict[k] = list(v)
return master_dict
def generate_unique_queries(fields_dict):
"""
Return list of strings, where each string is a unique
possible query for the grant data set
"""
# create sets of each category's options
# Reference: "fieldQueries":
# (@gfocategories==\"US Program\")
# (@gfotopics==\"College-Ready\")
# (@gfoyear==\"2009 and earlier\")
# (@gforegions==\"North America\")
master_query_set = set()
template_query = "(@%s==\"%s\")"
# Convert each plain string into it's query string equivalent
query_string_dict = {}
for category, fields in fields_dict.items():
query_string_dict[category] = []
for f in fields:
query_string = template_query % (category, f)
query_string_dict[category].append(query_string)
# Add each query on it's own (without combining with other
# possibilities)
master_query_set.add(query_string)
# Also, add an empty entry to each dictionary
# (this is so the itertools.product function will return
# possible combos including 0)
query_string_dict[category].append("")
# Generate the product of all possible queries as well
# Why yes, what I did here does make me feel like a fool.
list_of_tuples = product(
query_string_dict.values()[0],
query_string_dict.values()[1],
query_string_dict.values()[2],
query_string_dict.values()[3]
)
# Make tuple into strings
query_strings = []
# Remove empty strings
strings_gone = [tuple(y for y in x if y != "") for x in list_of_tuples]
for item in strings_gone:
# remove empty tuples
if len(item) > 0:
# convert them to actual query strings
query_strings.append(" and ".join(str(i) for i in item))
master_query_set.update(query_strings)
return list(master_query_set)
def save_unique_queries(minimum_count=150):
"""Save/update unique queries on disk"""
print "Retrieving unique query strings, minimum filter count = %d" % minimum_count
raw_json = get_inital_parameters()
facets_dict = extract_facets(raw_json)
fields_dict = create_field_dicts(facets_dict, minimum_count)
unique_queries = generate_unique_queries(fields_dict)
print "Returning a set of %d unique queries" % len(unique_queries)
with open("unique_queries.json", 'w') as outfile:
json.dump(unique_queries, outfile)
if __name__ == "__main__":
queries = save_unique_queries()
| dylanjbarth/gates-found-scraper | generate_queries.py | Python | mit | 4,344 |
import os
from lxml import etree
# write_rss_xml writes name and date data for podcast RSS feeds to XML files
# contained in the relative path ./feeds. It is currently assumed that each
# podcast will have its data stored in a separate file.
def write_rss_xml( feed, feed_url, latest_title, latest_date ):
# the function currently assumes that the storage location will be
file_path = "feeds"
feed_name = feed
file_name = feed_name + "_rss.xml"
# Open the file to write binary data. write_rss_xml will currently only
# operate by re-writing the xml file every time; this will be improved in
# a later version.
with open(os.path.join(file_path, file_name), "wb") as write_file:
# root of lxml ElementTree
root = etree.Element("podcast")
# subElements of root. Right now there should only be four of these,
# corresponding to the podcast name, feed url, latest episode name, and
# latest episode date.
pageElement = etree.SubElement(root, "name").text = feed_name
pageElement = etree.SubElement(root, "url").text = feed_url
pageElement = etree.SubElement(root, "episode").text = latest_title
pageElement = etree.SubElement(root, "date").text = latest_date
# Create a string from the ElementTree with pretty print and necessary
# encoding, for printing to file.
out_xml = etree.tostring(root, xml_declaration=True, pretty_print=True)
# Print the string out_xml to the file whose name is contained in the
# variable file_name.
write_file.write(out_xml)
write_file.close()
| digwiz/engrss | write_rss_xml.py | Python | mit | 1,634 |
from python_vlookup import *
| cscanlin/Super-Simple-VLOOKUP-in-Python | python_vlookup/__init__.py | Python | mit | 29 |
# -*- coding: utf-8 -*-
"""
Amavis management frontend.
Provides:
* SQL quarantine management
* Per-domain settings
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy
from modoboa.admin.models import Domain
from modoboa.core.extensions import ModoExtension, exts_pool
from modoboa.parameters import tools as param_tools
from . import __version__, forms
from .lib import create_user_and_policy, create_user_and_use_policy
class Amavis(ModoExtension):
"""The Amavis extension."""
name = "modoboa_amavis"
label = ugettext_lazy("Amavis frontend")
version = __version__
description = ugettext_lazy("Simple amavis management frontend")
url = "quarantine"
available_for_topredirection = True
def load(self):
param_tools.registry.add("global", forms.ParametersForm, "Amavis")
param_tools.registry.add(
"user", forms.UserSettings, ugettext_lazy("Quarantine"))
def load_initial_data(self):
"""Create records for existing domains and co."""
for dom in Domain.objects.all():
policy = create_user_and_policy("@{0}".format(dom.name))
for domalias in dom.domainalias_set.all():
domalias_pattern = "@{0}".format(domalias.name)
create_user_and_use_policy(domalias_pattern, policy)
exts_pool.register_extension(Amavis)
| modoboa/modoboa-amavis | modoboa_amavis/modo_extension.py | Python | mit | 1,395 |
import os
from loguru import logger
from requests import Session
from requests.exceptions import RequestException
from flexget import plugin
from flexget.event import event
from flexget.utils.template import RenderError
logger = logger.bind(name='qbittorrent')
class OutputQBitTorrent:
"""
Example:
qbittorrent:
username: <USERNAME> (default: (none))
password: <PASSWORD> (default: (none))
host: <HOSTNAME> (default: localhost)
port: <PORT> (default: 8080)
use_ssl: <SSL> (default: False)
verify_cert: <VERIFY> (default: True)
path: <OUTPUT_DIR> (default: (none))
label: <LABEL> (default: (none))
tags: <TAGS> (default: (none))
maxupspeed: <torrent upload speed limit> (default: 0)
maxdownspeed: <torrent download speed limit> (default: 0)
add_paused: <ADD_PAUSED> (default: False)
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'host': {'type': 'string'},
'port': {'type': 'integer'},
'use_ssl': {'type': 'boolean'},
'verify_cert': {'type': 'boolean'},
'path': {'type': 'string'},
'label': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'maxupspeed': {'type': 'integer'},
'maxdownspeed': {'type': 'integer'},
'fail_html': {'type': 'boolean'},
'add_paused': {'type': 'boolean'},
'skip_check': {'type': 'boolean'},
},
'additionalProperties': False,
},
]
}
def __init__(self):
super().__init__()
self.session = Session()
self.api_url_login = None
self.api_url_upload = None
self.api_url_download = None
self.api_url_info = None
self.url = None
self.connected = False
def _request(self, method, url, msg_on_fail=None, **kwargs):
try:
response = self.session.request(method, url, **kwargs)
if response.text == "Ok.":
return True
msg = msg_on_fail if msg_on_fail else f'Failure. URL: {url}, data: {kwargs}'
except RequestException as e:
msg = str(e)
logger.error('Error when trying to send request to qBittorrent: {}', msg)
return False
def check_api_version(self, msg_on_fail, verify=True):
try:
url = self.url + "/api/v2/app/webapiVersion"
response = self.session.request('get', url, verify=verify)
if response.status_code != 404:
self.api_url_login = '/api/v2/auth/login'
self.api_url_upload = '/api/v2/torrents/add'
self.api_url_download = '/api/v2/torrents/add'
self.api_url_info = '/api/v2/torrents/info'
return response
url = self.url + "/version/api"
response = self.session.request('get', url, verify=verify)
if response.status_code != 404:
self.api_url_login = '/login'
self.api_url_upload = '/command/upload'
self.api_url_download = '/command/download'
self.api_url_info = '/query/torrents'
return response
msg = 'Failure. URL: {}'.format(url) if not msg_on_fail else msg_on_fail
except RequestException as e:
msg = str(e)
raise plugin.PluginError(
'Error when trying to send request to qBittorrent: {}'.format(msg)
)
def connect(self, config):
"""
Connect to qBittorrent Web UI. Username and password not necessary
if 'Bypass authentication for localhost' is checked and host is
'localhost'.
"""
self.url = '{}://{}:{}'.format(
'https' if config['use_ssl'] else 'http', config['host'], config['port']
)
self.check_api_version('Check API version failed.', verify=config['verify_cert'])
if config.get('username') and config.get('password'):
data = {'username': config['username'], 'password': config['password']}
if not self._request(
'post',
self.url + self.api_url_login,
data=data,
msg_on_fail='Authentication failed.',
verify=config['verify_cert'],
):
raise plugin.PluginError('Not connected.')
logger.debug('Successfully connected to qBittorrent')
self.connected = True
def check_torrent_exists(self, hash_torrent, verify_cert):
if not self.connected:
raise plugin.PluginError('Not connected.')
if not isinstance(hash_torrent, str):
logger.error('Error getting torrent info, invalid hash {}', hash_torrent)
return False
hash_torrent = hash_torrent.lower()
logger.debug(f'Checking if torrent with hash {repr(hash)} already in session.')
url = f'{self.url}{self.api_url_info}'
params = {'hashes': hash_torrent}
try:
respose = self.session.request(
'get',
url,
params=params,
verify=verify_cert,
)
except RequestException as e:
logger.error('Error getting torrent info, request to hash {} failed', hash_torrent)
return False
if respose.status_code != 200:
logger.error(
'Error getting torrent info, hash {} search returned',
hash_torrent,
respose.status_code,
)
return False
check_file = respose.json()
if isinstance(check_file, list) and check_file:
logger.warning('File with hash {} already in qbittorrent', hash_torrent)
return True
return False
def add_torrent_file(self, entry, data, verify_cert):
file_path = entry['file']
if not self.connected:
raise plugin.PluginError('Not connected.')
multipart_data = {k: (None, v) for k, v in data.items()}
with open(file_path, 'rb') as f:
multipart_data['torrents'] = f
if not self._request(
'post',
self.url + self.api_url_upload,
msg_on_fail='Failed to add file to qBittorrent',
files=multipart_data,
verify=verify_cert,
):
entry.fail(f'Error adding file `{file_path}` to qBittorrent')
return
logger.debug('Added torrent file {} to qBittorrent', file_path)
def add_torrent_url(self, entry, data, verify_cert):
url = entry['url']
if not self.connected:
raise plugin.PluginError('Not connected.')
data['urls'] = url
multipart_data = {k: (None, v) for k, v in data.items()}
if not self._request(
'post',
self.url + self.api_url_download,
msg_on_fail=f'Failed to add url to qBittorrent: {url}',
files=multipart_data,
verify=verify_cert,
):
entry.fail(f'Error adding url `{url}` to qBittorrent')
return
logger.debug('Added url {} to qBittorrent', url)
@staticmethod
def prepare_config(config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 8080)
config.setdefault('use_ssl', False)
config.setdefault('verify_cert', True)
config.setdefault('label', '')
config.setdefault('tags', [])
config.setdefault('maxupspeed', 0)
config.setdefault('maxdownspeed', 0)
config.setdefault('fail_html', True)
return config
def add_entries(self, task, config):
for entry in task.accepted:
form_data = {}
try:
save_path = entry.render(entry.get('path', config.get('path', '')))
if save_path:
form_data['savepath'] = save_path
except RenderError as e:
logger.error('Error setting path for {}: {}', entry['title'], e)
label = entry.render(entry.get('label', config.get('label', '')))
if label:
form_data['label'] = label # qBittorrent v3.3.3-
form_data['category'] = label # qBittorrent v3.3.4+
tags = entry.get('tags', []) + config.get('tags', [])
if tags:
try:
form_data['tags'] = entry.render(",".join(tags))
except RenderError as e:
logger.error('Error rendering tags for {}: {}', entry['title'], e)
form_data['tags'] = ",".join(tags)
add_paused = entry.get('add_paused', config.get('add_paused'))
if add_paused:
form_data['paused'] = 'true'
skip_check = entry.get('skip_check', config.get('skip_check'))
if skip_check:
form_data['skip_checking'] = 'true'
maxupspeed = entry.get('maxupspeed', config.get('maxupspeed'))
if maxupspeed:
form_data['upLimit'] = maxupspeed * 1024
maxdownspeed = entry.get('maxdownspeed', config.get('maxdownspeed'))
if maxdownspeed:
form_data['dlLimit'] = maxdownspeed * 1024
is_magnet = entry['url'].startswith('magnet:')
if task.manager.options.test:
logger.info('Test mode.')
logger.info('Would add torrent to qBittorrent with:')
if not is_magnet:
logger.info('File: {}', entry.get('file'))
else:
logger.info('Url: {}', entry.get('url'))
logger.info('Save path: {}', form_data.get('savepath'))
logger.info('Label: {}', form_data.get('label'))
logger.info('Tags: {}', form_data.get('tags'))
logger.info('Paused: {}', form_data.get('paused', 'false'))
logger.info('Skip Hash Check: {}', form_data.get('skip_checking', 'false'))
if maxupspeed:
logger.info('Upload Speed Limit: {}', form_data.get('upLimit'))
if maxdownspeed:
logger.info('Download Speed Limit: {}', form_data.get('dlLimit'))
continue
if self.check_torrent_exists(
entry.get('torrent_info_hash'), config.get('verify_cert')
):
continue
if not is_magnet:
if 'file' not in entry:
entry.fail('File missing?')
continue
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
logger.debug('entry: {}', entry)
logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
continue
self.add_torrent_file(entry, form_data, config['verify_cert'])
else:
self.add_torrent_url(entry, form_data, config['verify_cert'])
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate torrent files to load into
qBittorrent.
"""
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to qBittorrent at exit."""
if task.accepted:
config = self.prepare_config(config)
self.connect(config)
self.add_entries(task, config)
@event('plugin.register')
def register_plugin():
plugin.register(OutputQBitTorrent, 'qbittorrent', api_ver=2)
| Flexget/Flexget | flexget/plugins/clients/qbittorrent.py | Python | mit | 12,568 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
type 'pytest -v' to run u test series
"""
import codecs
import json
import os
import pytest
import tempfile
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import core
class TestHelpers:
@staticmethod
def file_read(fd):
"""
from current descriptor, create a new descriptor in read mode and read file content
:return file content
:rtype str
"""
with codecs.open(fd.name, 'r', 'utf-8') as f:
res = f.read()
return res
@staticmethod
def same_class(instance, should_cls):
return str(instance.__class__) == str(should_cls)
@staticmethod
def json_compare(a, b):
return json.dumps(a) == json.dumps(b)
class Fake(object):
pass
@pytest.fixture()
def fixture_dict_data():
return dict(
id=1,
active=True,
name='foobar',
nested=dict(id=1, name='nested'),
items=[dict(id=1, name='item1'),],
)
def fixture_dict_data_check_matching(o, data):
assert o
# check __dict__
assert isinstance(o.__dict__, dict)
assert len(o.__dict__.keys()) == len(data.keys())
assert o.__dict__['id'] == data['id']
assert o.__dict__['active'] == data['active']
assert o.__dict__['name'] == data['name']
assert TestHelpers.same_class(o.__dict__['nested'], core.Object)
assert len(o.__dict__['nested'].__dict__.keys()) == len(data['nested'].keys())
assert o.__dict__['nested'].__dict__['id'] == data['nested']['id']
assert o.__dict__['nested'].__dict__['name'] == data['nested']['name']
assert isinstance(o.__dict__['items'], list)
assert len(o.__dict__['items']) == len(data['items'])
assert TestHelpers.same_class(o.__dict__['items'][0], core.Object)
assert o.__dict__['items'][0].__dict__['id'] == data['items'][0]['id']
assert o.__dict__['items'][0].__dict__['name'] == data['items'][0]['name']
# check attrs
assert hasattr(o, 'id')
assert hasattr(o, 'active')
assert hasattr(o, 'name')
assert hasattr(o, 'nested')
assert hasattr(o, 'items')
assert o.id == data['id']
assert o.active == data['active']
assert o.name == data['name']
assert TestHelpers.same_class(o.nested, core.Object)
assert hasattr(o.nested, 'id')
assert hasattr(o.nested, 'name')
assert o.nested.id == data['nested']['id']
assert o.nested.name == data['nested']['name']
assert isinstance(o.items, list)
assert len(o.items) == len(data['items'])
assert hasattr(o.items[0], 'id')
assert hasattr(o.items[0], 'name')
assert o.items[0].id == data['items'][0]['id']
assert o.items[0].name == data['items'][0]['name']
@pytest.fixture()
def fixture_json_data():
return json.dumps(dict(
id=1,
active=True,
name='foobar',
nested=dict(id=1, name='nested'),
items=[dict(id=1, name='item1')],
))
@pytest.fixture()
def fixture_repr_data():
return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \
" items: [<class 'core.Object'>, 2 attrs: id: 1, name: 'item1']," \
" name: 'foobar', nested: <class 'core.Object'>, 2 attrs: id: 1, name: 'nested'"
@pytest.fixture()
def fixture_str_data():
return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \
" items: [{'id': 1, 'name': 'item1'}], name: 'foobar'," \
" nested: {'id': 1, 'name': 'nested'}"
@pytest.fixture()
def fixture_update_merge_data():
return {
'data': {'foo': {'bar': {'message': 'foobar'}}},
'data2': {
'foo': {'bar': {'color': 'green'}},
'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}},
},
'merge': {
'foo': {'bar': {'message': 'foobar', 'color': 'green'}},
'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}},
},
}
@pytest.fixture()
def fixture_config_file(request):
fd = tempfile.NamedTemporaryFile(mode='w', suffix='.ini', delete=False)
with fd:
fd.write("""
[foo]
foo1=Fee
foo2=Fie
[bar]
bar1=Foe
bar2=Foo
""")
def delete():
if not fd.closed:
fd.close()
os.remove(fd.name)
request.addfinalizer(delete)
return fd
@pytest.fixture()
def fixture_config_file_expected_data():
return dict(
foo=dict(foo1='Fee', foo2='Fie'),
bar=dict(bar1='Foe', bar2='Foo'),
)
class Test01ObjectContract():
def test_00_of_class_ko(self):
assert not core.Object.of_class(None)
assert not core.Object.of_class(False)
assert not core.Object.of_class(True)
assert not core.Object.of_class(1)
assert not core.Object.of_class('a')
assert not core.Object.of_class(object())
assert not core.Object.of_class(Fake())
class Test02ObjectConstructor():
def test_00_contract_ko(self):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(False)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(True)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(1)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object('a')
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(object())
def test_01_empty(self):
o = core.Object()
assert o
# alias
o = core.o()
assert o
o = core.Object(None)
assert o
o = core.Object(dict())
assert o
def test_02_of_class(self):
assert core.Object.of_class(core.Object())
assert core.Object.of_class(core.o())
@pytest.mark.usefixtures('fixture_dict_data')
def test_03_from_dict(self, fixture_dict_data):
fixture_dict_data_check_matching(core.Object(fixture_dict_data), fixture_dict_data)
fixture_dict_data_check_matching(core.o(fixture_dict_data), fixture_dict_data)
# get_dict: will be used for following test so at serie start
@pytest.mark.usefixtures('fixture_dict_data')
def test_04_get_dict(self, fixture_dict_data):
o = core.o(fixture_dict_data)
assert o.get_dict() == fixture_dict_data
def test_05_kwargs(self):
o = core.o(id=1, name='kwarg')
assert o.get_dict() == dict(id=1, name='kwarg')
o = core.o(dict(), id=1, name='kwarg')
assert o.get_dict() == dict(id=1, name='kwarg')
o = core.o(dict(description='from dict'), id=1, name='kwarg')
assert o.get_dict() == dict(description='from dict', id=1, name='kwarg')
class Test02ObjectUpdateContent():
@pytest.mark.usefixtures('fixture_dict_data')
def test_00_setattr(self, fixture_dict_data):
o = core.o(fixture_dict_data)
# change exiting attribute
o.name = 'changed'
assert o.name == 'changed'
o.nested.name = 'changed2'
assert o.nested.name == 'changed2'
o.items[0].name = 'changed3'
assert o.items[0].name == 'changed3'
# new attribute
o.description = 'description'
assert o.description == 'description'
o.nested2 = core.o(dict(id=2, name='nested2'))
assert o.nested2.id == 2
assert o.nested2.name == 'nested2'
o.nested3 = core.o()
o.nested3.id = 3
assert o.nested3.id == 3
o.items2 = [core.o(dict(id=2, name='item2'))]
assert o.items2[0].id == 2
assert o.items2[0].name == 'item2'
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_01_update(self, fixture_update_merge_data):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_SELF):
core.o().update(1)
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
o = core.o(data)
assert o.get_dict() == data
o.update(data2)
assert o.get_dict() == data2
assert core.o(data).update(data2).get_dict() == data2 # chained style
o = core.o()
assert o.get_dict() == {}
o.update(data)
assert o.get_dict() == data
o.update(data2)
assert o.get_dict() == data2
assert core.o().update(data).update(data2).get_dict() == data2 # chained style
o = core.o(data)
o.update(core.o(data2))
assert o.get_dict() == data2
assert core.o(data).update(core.o(data2)).get_dict() == data2 # chained style
o = core.o()
assert o.get_dict() == {}
o.update(core.o(data))
assert o.get_dict() == data
o.update(core.o(data2))
assert o.get_dict() == data2
assert core.o().update(core.o(data)).update(core.o(data2)).get_dict() == data2 # chained style
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_02_merge(self, fixture_update_merge_data):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_SELF):
core.o().merge(1)
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
o = core.o(data)
assert o.get_dict() == data
o.merge(data2)
assert o.get_dict() == merge
assert core.o(data).merge(data2).get_dict() == merge # chained style
o = core.o()
o.merge(data)
assert o.get_dict() == data
o.merge(data2)
assert o.get_dict() == merge
assert core.o().merge(data).merge(data2).get_dict() == merge # chained style
o = core.o(data)
o.merge(core.o(data2))
assert o.get_dict() == merge
assert core.o(data).merge(core.o(data2)).get_dict() == merge # chained style
o = core.o()
assert o.get_dict() == {}
o.merge(core.o(data))
assert o.get_dict() == data
o.merge(core.o(data2))
assert o.get_dict() == merge
assert core.o().merge(core.o(data)).merge(core.o(data2)).get_dict() == merge
class Test03Json():
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_json_data')
def test_00_jsonify(self, fixture_dict_data, fixture_json_data):
assert TestHelpers.json_compare(core.o(fixture_dict_data).jsonify(), fixture_json_data)
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_json_data')
def test_01_unjsonify(self, fixture_dict_data, fixture_json_data):
with pytest.raises(AssertionError):
core.Object.unjsonify(1)
assert core.Object.unjsonify(fixture_json_data).get_dict() == fixture_dict_data
assert core.unjsonify(fixture_json_data).get_dict() == fixture_dict_data
class Test04ObjectMagic():
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_repr_data')
def test_00_repr(self, fixture_dict_data, fixture_repr_data):
assert repr(core.o(fixture_dict_data)) == fixture_repr_data
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_str_data')
def test_01_str(self, fixture_dict_data, fixture_str_data):
assert str(core.o(fixture_dict_data)) == fixture_str_data
def test_02_eq_ne(self):
o1 = core.o(id=1, name='foobar')
o2 = core.o(id=1, name='foobar')
o3 = core.o(id=3, name='foobar3')
assert o1 == o2
assert o1 != o3
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_03_add(self, fixture_update_merge_data):
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
assert (core.o(data) + core.o(data2)).get_dict() == merge
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_04_iadd(self, fixture_update_merge_data):
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
o = core.o(data)
o += core.o(data2)
assert o.get_dict() == merge
class Test05ObjectGetContent():
def test_00_get(self):
data = {'foo': {'bar': {'message': 'foobar'}}}
o = core.o(data)
assert o.get('ko') is None
assert TestHelpers.same_class(o.get('foo'), core.Object)
assert o.get('foo').get_dict() == {'bar': {'message': 'foobar'}}
assert TestHelpers.same_class(o.get('foo').get('bar'), core.Object)
assert o.get('foo').get('bar').get_dict() == {'message': 'foobar'}
assert o.get('foo').get('bar').get('message') == 'foobar'
def test_01_get_dot(self):
data = {'foo': {'bar': {'message': 'foobar'}}}
o = core.o(data)
assert o.get_dot('ko') is None
assert o.get_dot('ko.ko') is None
assert TestHelpers.same_class(o.get_dot('foo'), core.Object)
assert o.get_dot('foo').get_dict() == {'bar': {'message': 'foobar'}}
assert TestHelpers.same_class(o.get_dot('foo.bar'), core.Object)
assert o.get_dot('foo.bar').get_dict() == {'message': 'foobar'}
assert o.get_dot('foo.bar.message') == 'foobar'
@pytest.mark.usefixtures('fixture_dict_data')
def test_02_attrs(self, fixture_dict_data):
o = core.o(fixture_dict_data)
assert o.attrs() == sorted(fixture_dict_data.keys())
class Test06ObjectExtra():
@pytest.mark.usefixtures('fixture_config_file')
@pytest.mark.usefixtures('fixture_config_file_expected_data')
def test_00_cfg_read_get(self, fixture_config_file, fixture_config_file_expected_data):
with pytest.raises(AssertionError):
core.o().read_cfg(1)
assert core.o().read_cfg(fixture_config_file.name).get_dict() == fixture_config_file_expected_data
| pyseed/objify | objify/test/test_core.py | Python | mit | 13,959 |
#第四集(包含部分文件3.py和部分第二集)
# courses=['History','Math','Physics','Compsci']#此行代码在Mutable之前都要打开
# print(courses)
# courses.append('Art')#在最后添加一个元素
# courses.insert(0,'English')#在0的位置添加一个元素
# courses_2=['Chinese','Education']
# courses.insert(1,courses_2)#看看这条代码与下面两条代码有什么不同
# courses.append(courses_2)
# courses.extend(courses_2)
# #用pop删除和用remove删除可以详见3.py
# # courses.remove('Math')#删除一个元素
# popped=courses.pop()#删除一个元素并将该元素赋值给popped (括号内无数字则默认最后一个)
# print(popped)#输出被删除的元素
# courses.reverse()#将元素倒叙
# courses.sort()#排序 按开头字母的顺序 数字排在字母前
# print(courses)
# courses.sort(reverse=True)#按顺序倒叙(若=False则无效)
# print(courses)
# sorted_courses=sorted(courses)
# print(sorted_courses)
# alphabet=['DA1','SA2','AD3','3AD']
# alphabet.sort()
# print(alphabet)
# nums=[3,5,1,4,2]
# nums.sort()
# print(nums)
# print(min(nums))#输出最小数
# print(max(nums))#输出最大数
# print(sum(nums))#输出总和
# #中文不知道是什么规则
# Chinese=['啊了','吧即','啦']
# Chinese.sort()
# print(Chinese)
# print(courses.index('Math'))#查找某元素在列表中的位置
# print('Art' in courses)#True则表示该元素存在于列表,False则是不存在
#for和in语言
# for item in courses: #将courses中的元素一个一个输出
# print(item)
# #输出元素位置和元素
# for course in enumerate(courses):
# print(course)
# for index,course in enumerate(courses):
# print(index,course)
# for index,course in enumerate(courses,start=1):
# print(index,course)
# courses_str=' - '.join(courses)#将' - '插入courses中输出
# new_list=courses_str.split(' - ')#将' - '从courses_str中删除
# print(courses_str)
# print(new_list)
# #Mutable (可变的)
# list_1=['History','Math','Physics','Compsci']
# list_2=list_1
# print(list_1)
# print(list_2)
# list_1[0]='Art'
# print(list_1)
# print(list_2)
# #Immutable (不可变的)(这里很神奇,视频上不可以但是我可以)
# tuple_1=['History','Math','Physics','Compsci']
# tuple_2=tuple_1
# print(tuple_1)
# print(tuple_2)
# tuple_1[0]='Art'
# print(tuple_1)
# print(tuple_2)
# #Sets
# cs_courses={'History', 'Math', 'Physics', 'Compsci','Math'}#用大括号则会将两个相同的元素只输出前一个
# art_courses={'History', 'Math', 'Art', 'Design'}
# print(cs_courses)
# print(cs_courses.intersection(art_courses))#输出两个列表中相同的元素
# print(cs_courses.difference(art_courses))#输出两个列表中不相同的元素
# print(cs_courses.union(art_courses))#将两个列表合并(每次运行顺序都不同)
#Empty Lists
empty_list=[]
empty_list=list()
#Empty Tuples
empty_tuple=()
empty_tuple=tuple()
#Empty Sets
empty_set={} #错误的
empty_set=set()
| Tiger-C/python | python教程/第四集.py | Python | mit | 3,007 |
import os
from flask import Flask, Response, request, url_for
import psycopg2
import urlparse
import plivo
import plivoxml
AUTH_ID = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
AUTH_TOKEN = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
CALLER_ID = '+12345678901'
BOX_ID = '+12345678901'
MY_URL = 'http://morning-ocean-4669.herokuapp.com/report/'
app = Flask(__name__)
@app.route('/response/speak/', methods=['GET'])
def speak():
# Enter the message you want to play
text = "Congratulations! You just made a text to speech app on Plivo cloud!"
parameters = {'loop': 1, 'language': "en-US", 'voice': "WOMAN"}
response = plivoxml.Response()
response.addSpeak(text, **parameters)
return Response(str(response), mimetype='text/xml')
@app.route('/send', methods=['GET', 'POST'])
def send():
# Enter the message you want to send
auth_id = os.environ.get("AUTH_ID", AUTH_ID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
caller_id = os.environ.get("CALLER_ID", CALLER_ID)
box_id = os.environ.get("BOX_ID", BOX_ID)
my_url = os.environ.get("MY_URL", MY_URL)
params = {
'src': caller_id, # Sender's phone number with country code
'dst' : box_id, # Receiver's phone Number with country code
'text' : u"Hello, how are you?", # Your SMS Text Message - English
'url' : my_url, # The URL to which with the status of the message is sent
'method' : 'POST' # The method used to call the url
}
if request.method == 'GET':
response = plivoxml.Response()
#response.addSpeak(auth_id + auth_token + caller_id + box_id + my_url)
elif request.method == 'POST':
p = plivo.RestAPI(auth_id, auth_token)
response = p.send_message(params)
return Response(str(response), mimetype='text/xml')
@app.route('/call', methods=['GET', 'POST'])
def call():
# Enter the message you want to send
auth_id = os.environ.get("AUTH_ID", AUTH_ID)
auth_token = os.environ.get("AUTH_TOKEN", AUTH_TOKEN)
caller_id = os.environ.get("CALLER_ID", CALLER_ID)
box_id = os.environ.get("BOX_ID", BOX_ID)
my_url = os.environ.get("MY_URL", MY_URL)
client = request.values.get('client')
params = {
'from': caller_id, # Caller Id
'to' : box_id, # User Number to Call
'answer_url' : my_url+"call",
'time_limit': 80
}
if request.method == 'GET':
response = plivoxml.Response()
response.addSpeak("hello "+client)
#response.addSpeak(auth_id + auth_token + caller_id + box_id + my_url)
#p = plivo.RestAPI(auth_id, auth_token)
#response = p.make_call(params)
elif request.method == 'POST':
response = plivoxml.Response()
response.addSpeak("hello "+client)
#p = plivo.RestAPI(auth_id, auth_token)
#response = p.make_call(params)
return Response(str(response), mimetype='text/xml')
@app.route("/initdb", methods=['GET', 'POST'])
def initdb():
response = plivoxml.Response()
client = request.values.get('client')
if client == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
try:
cur.execute("CREATE TABLE IF NOT EXISTS test (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (100, "abc'def"))
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.commit()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/writedb", methods=['GET', 'POST'])
def writedb():
response = plivoxml.Response()
client = request.values.get('client')
text = request.values.get('text')
if client == None:
return Response(str(response), mimetype='text/xml')
if text == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
#cur.execute("UPDATE test SET data = 'abcd' WHERE num = 100;")
try:
SQL = "UPDATE test SET data = %s WHERE num = 100;"
data = (""+text+"",)
cur.execute(SQL, data)
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.commit()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/readdb", methods=['GET', 'POST'])
def readdb():
response = plivoxml.Response()
client = request.values.get('client')
if client == None:
return Response(str(response), mimetype='text/xml')
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM test;")
response.addSpeak(cur.fetchone())
except Exception, e:
response.addSpeak(e)
cur.close()
conn.close()
return Response(str(response), mimetype='text/xml')
@app.route("/writefile", methods=['GET', 'POST'])
def writefile():
response = plivoxml.Response()
client = request.values.get('client')
try:
file = open("/tmp/foo.txt", "w")
file.write('this is a line of text')
file.close()
read_file = open("/tmp/foo.txt", 'r')
text = read_file.read()
read_file.close()
response.addSpeak(text)
except Exception, e:
response.addSpeak(e)
return Response(str(response), mimetype='text/xml')
@app.route("/readfile", methods=['GET', 'POST'])
def readfile():
response = plivoxml.Response()
client = request.values.get('client')
try:
read_file = open("/tmp/foo.txt", 'r')
text = read_file.read()
read_file.close()
response.addSpeak(text)
except Exception, e:
response.addSpeak(e)
return Response(str(response), mimetype='text/xml')
@app.route("/hello", methods=['GET', 'POST'])
def hello():
response = plivoxml.Response()
client = request.values.get('client')
response.addSpeak("hello "+client)
return Response(str(response), mimetype='text/xml')
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| kmichal2/plivo | app.py | Python | mit | 7,016 |
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django_countries import CountryField
from django.contrib.localflavor.us.models import *
from django.contrib.localflavor.us.us_states import STATE_CHOICES
# https://docs.djangoproject.com/en/1.3/ref/contrib/localflavor/#united-states-of-america-us
# TODO: django scheduler
# TODO: confirm what's in User model
# User model?
# first name
# last name
# auth_mode (custom, facebook, tumblr?)
class Profile(models.Model):
"""
Profile extends Django User Model
todo: Selection should eventually be intelligent based on location
"""
user = models.ForeignKey(User, unique=True, verbose_name='user')
school = models.ForeignKey('School', blank=True, null=True)
watch_list = models.ForeignKey('WatchList', blank=True, null=True, related_name="profile_watch_list")
# class Meta:
# verbose_name_plural = 'Profiles'
# ordering = ('user',)
#
# def __unicode__(self):
# return self.user
#
# @models.permalink
# def get_absolute_url(self):
# return ('view_forum_category', (self.forum.slug, self.slug,))
class School(models.Model):
name = models.CharField(max_length=200, unique=True)
address = models.ForeignKey('Address', blank=True, null=True)
contact = models.ForeignKey('Contact', blank=True, null=True)
district = models.PositiveIntegerField(blank=True, null=True)
def __unicode__(self):
return self.name
class Address(models.Model):
street1 = models.CharField(max_length=250)
street2 = models.CharField(max_length=250, blank=True, null=True)
city = models.CharField(max_length=100, default='Oakland')
state = USStateField(choices=STATE_CHOICES, default='CA', blank=True, null=True)
country = CountryField(blank=True, null=True, default='US')
zipcode = models.CharField(max_length=10, blank=True, null=True)
# GIS is computed as a post-save process, so must
# be able to be null on first save
location = models.ForeignKey('GIS', blank=True, null=True)
def __unicode__(self):
return self.street1
class GIS(models.Model):
"""
GIS location data for events, schools,
bus stops, and bart stops
"""
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class EventDate(models.Model):
"""
Can this be replaced with a django scheduler?
"""
date = models.DateTimeField()
duration_mins = models.PositiveIntegerField(default=60)
def __unicode__(self):
return self.date.isoformat()
class Meta:
ordering = ['date']
class Contact(models.Model):
"""
Contact info for projects and events
"""
first_name = models.CharField(max_length=40, blank=True, null=True)
last_name = models.CharField(max_length=40, blank=True, null=True)
role = models.CharField(max_length=200, blank=True, null=True)
phone = PhoneNumberField(blank=True, null=True)
smsok = models.BooleanField(default=False)
tdd = PhoneNumberField(max_length=20, blank=True, null=True)
fax = PhoneNumberField(max_length=20, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
web = models.URLField(blank=True, null=True)
def __unicode__(self):
if self.email:
return self.email
if self.phone:
return self.phone
if self.web:
return self.web
return '<Contact at 0x%x>' % (id(self))
class Category(models.Model):
"""
Moderated set of categories for events
"""
name = models.CharField(max_length=60, unique=True)
def __unicode__(self):
return self.name
class Tag(models.Model):
"""
Moderated set of subcats for events
"""
name = models.CharField(max_length=60, unique=True)
def __unicode__(self):
return self.name
class Organization(models.Model):
"""
An organization that offers Programs
"""
name = models.CharField(max_length=250, unique=True)
about = models.TextField(blank=True, null=True)
headoffice = models.ForeignKey('Address', related_name='office')
contact = models.ForeignKey('Contact')
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Program(models.Model):
"""
Program info
"""
# Core Details
name = models.CharField(max_length=200, blank=True, null=True)
summary = models.TextField(blank=True, null=True)
about = models.TextField()
organization = models.ForeignKey('Organization', blank=True, null=True)
address = models.ForeignKey('Address')
notes = models.TextField(blank=True, null=True)
primary_contact = models.ForeignKey('Contact', null=True)
# Time
events = models.ManyToManyField('EventDate')
# Attendee Details
cost = models.FloatField(default=0.00)
agemin = models.PositiveIntegerField(default=13)
agemax = models.PositiveIntegerField(default=18)
registration_needed = models.BooleanField(blank=True, default=False)
# validate required if reg_needed
registration_due_by = models.DateTimeField(blank=True, null=True)
registration_instructions = models.TextField(blank=True, null=True)
# Organization
categories = models.ManyToManyField('Category')
tags = models.ManyToManyField('Tag')
# todo: make subcat intelligent based on cat selected
# Meta
is_active = models.BooleanField(default=False)
program_status = models.ForeignKey('ProgramStatus', null=True) # eg pending approval, approved, denied, need verifications, etc.
program_type = models.ForeignKey('ProgramType', null=True) # eg drop-in, register
rank = models.IntegerField(default=-1)
capcity = models.PositiveIntegerField(null=True) # who's going, how many total can attend
wait_list = models.ForeignKey('WaitList', blank=True, null=True, related_name="program_wait_list")
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
# holding
# logo = models.ImageField()
# attending = models.ForeignKey(User)
def next_event(self):
import datetime
events = self.events.filter(date__gt=datetime.datetime.now()).order_by('date')
if events:
return events[0].date
return None
def time_until(self):
return "hi"
def __unicode__(self):
return self.name
class ProgramStatus(models.Model):
program_status = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
class ProgramType(models.Model):
program_type = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
class WatchList(models.Model):
profile = models.ForeignKey('Profile', related_name="watchlist_profile")
program = models.ForeignKey('Program', related_name="watchlist_program")
date_added = models.DateTimeField(auto_now_add=True)
class WaitList(models.Model):
profile = models.ForeignKey('Profile', related_name="waitlist_profile")
program = models.ForeignKey('Program', related_name="waitlist_program")
date_added = models.DateTimeField(auto_now_add=True)
position = models.PositiveIntegerField(default=0)
def save(self):
self.position += 1
super(WaitList,self).save()
class PublicTransport(models.Model):
''' Pull data with APIs?'''
TRANSPORT_CHOICES = (
('B', 'Bus'),
('T', 'Train'),
('LR', 'Light Rail'),
)
company = models.CharField(max_length=100)
line = models.CharField(max_length=40)
name = models.CharField(max_length=40)
address = models.ForeignKey('Address')
pt_type = models.CharField(max_length=10, choices=TRANSPORT_CHOICES)
# -- Hoilding ---
# class Comment(models.Model):
# """
# A comment left by a user on an event
# """
# user = models.ForeignKey(User)
# program = models.ForeignKey(Program)
# flagged = models.BooleanField(default=False)
# date = models.DateTimeField(auto_now_add=True)
# text = models.CharField(max_length=140)
#
# class Meta:
# ordering = ["date"]
#
# class Admin:
# pass
#
# def __unicode__(self):
# return self.text
| jedp/oakland_pm | core/models.py | Python | mit | 8,624 |
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import user_passes_test
from Aluno.models import Aluno
def check_aluno_exist(user):
if not user.is_authenticated():
return False
try:
aluno = user.aluno_set.get()
return True
except Aluno.DoesNotExist:
return False
aluno_exist = user_passes_test(lambda u: check_aluno_exist(u))
| arruda/amao | AMAO/apps/Aluno/views/utils.py | Python | mit | 400 |
import collections
from sets import Set
from Drawer import Drawer
class Cultivar():
def __init__(self, id, name, year):
self.id = id
self.name = name
self.year = year
self.parent1 = None
self.parent2 = None
# row index starts from 0
self.row = 0
# column index starts from 0
self.col = None
self.rowAmount = None
class Grid():
middleColumn = 5
def initGrid(self, grid, rowNum, colNum, default):
for i in range(rowNum):
row = []
for j in range(colNum):
row.append(default)
grid.append(row)
def __init__(self, row):
self.rowTotalNumber = row
self.colTotalNumber = Grid.middleColumn * 2 + 1
self.grid = []
self.initGrid(self.grid, self.rowTotalNumber, self.colTotalNumber, 0)
# only double columns
def doubleGrid(self):
colTotalNumber = self.colTotalNumber
middleColumn = Grid.middleColumn
Grid.middleColumn += Grid.middleColumn
self.colTotalNumber = Grid.middleColumn * 2 + 1
grid = []
self.initGrid(grid, self.rowTotalNumber, self.colTotalNumber, 0)
for rowIndex in range(self.rowTotalNumber):
for colIndex in range(colTotalNumber):
newColIndex = middleColumn + colIndex
grid[rowIndex][newColIndex] = self.grid[rowIndex][colIndex]
self.grid = grid
def isAvailable(self, row, rowAmount, col):
for i in range(rowAmount):
if self.grid[row + i][col] != 0:
return False
return True
def occupyGrid(self, row, rowAmount, col, id):
for i in range(rowAmount):
self.grid[row + i][col] = id
def autoOccupy(self, row, rowAmount, col, id, offset=0):
targetColumn = col + offset
if targetColumn < 0 or targetColumn >= self.colTotalNumber:
self.doubleGrid()
col += (Grid.middleColumn / 2)
return self.autoOccupy(row, rowAmount, col, id, offset)
else:
if self.isAvailable(row, rowAmount, targetColumn):
self.occupyGrid(row, rowAmount, targetColumn, id)
return targetColumn
else:
if offset < 0:
return self.autoOccupy(row, rowAmount, col, id, -offset)
elif offset > 0:
return self.autoOccupy(row, rowAmount, col, id, -offset - 1)
elif offset == 0:
return self.autoOccupy(row, rowAmount, col, id, -1)
def occupy(self, cultivar, col):
return self.autoOccupy(cultivar.row, cultivar.rowAmount, col, cultivar.id)
class Generator():
def formatNone(self, value):
if value == None:
return None
if value == '' or value == 'None' or value == 'none':
return None
try:
return int(value)
except:
print "Year value or cultivar ID cannot be converted to integer"
exit(1)
# data is rows
# each row is [cultivar.ID, cultivar.Name, cultivar.Year, cultivar.Parent1, cultivar.Parent2]
def __init__(self, data, outputFolder):
self.outputFolder = outputFolder
self.data = data
self.outputFolder = outputFolder
self.nameIDMap = {}
self.IDCultivarMap = {}
self.idRowMap = {}
self.treeSet = None
for row in data:
self.idRowMap[row[0]] = row
for row in data:
id = row[0]
name = row[1]
year = self.formatNone(row[2])
parent1 = self.formatNone(row[3])
parent2 = self.formatNone(row[4])
self.nameIDMap[name] = id
cultivar = self.IDCultivarMap.get(id)
if cultivar == None:
cultivar = Cultivar(id, name, year)
self.IDCultivarMap[id] = cultivar
if parent1 != None:
parent1_Obj = self.IDCultivarMap.get(parent1)
if parent1_Obj == None:
row = self.idRowMap[parent1]
parent1_Obj = Cultivar(row[0], row[1], row[2])
self.IDCultivarMap[parent1] = parent1_Obj
cultivar.parent1 = parent1_Obj
if parent2 != None:
parent2_Obj = self.IDCultivarMap.get(parent2)
if parent2_Obj == None:
row = self.idRowMap[parent2]
parent2_Obj = Cultivar(row[0], row[1], row[2])
self.IDCultivarMap[parent2] = parent2_Obj
cultivar.parent2 = parent2_Obj
def calcRow(self, cultivar):
self.treeSet = Set()
queue = collections.deque()
queue.append(cultivar)
cultivar.row = 0
while len(queue) != 0:
c = queue.popleft()
self.treeSet.add(c)
if c.parent1 != None:
queue.append(c.parent1)
c.parent1.row = c.row + 1
if c.parent2 != None and c.parent2 != c.parent1:
queue.append(c.parent2)
c.parent2.row = c.row + 1
def printTree(self, cultivar):
queue = collections.deque()
queue.append(cultivar)
while len(queue) != 0:
c = queue.popleft()
if c.parent1 != None:
queue.append(c.parent1)
if c.parent2 != None:
queue.append(c.parent2)
print c.name, c.rowAmount
def printGrid(self, grid):
digit = 3
for row in grid:
newRow = []
for col in row:
col = str(col)
for i in range(digit - len(col)):
col = '0' + col
newRow.append(col)
print newRow
# calcRow should be called before this function
def calcRowAmount(self, cultivar):
queue = collections.deque()
queue.append(cultivar)
self.totalRow = 0
while len(queue) != 0:
c = queue.popleft()
biggest = 1
if c.parent1 != None:
queue.append(c.parent1)
biggest = c.parent1.row - c.row
if c.parent2 != None and c.parent2 != c.parent1:
queue.append(c.parent2)
if c.parent2.row > c.parent1.row:
biggest = c.parent2.row - c.row
c.rowAmount = biggest
tmp = c.rowAmount + c.row
if tmp > self.totalRow:
self.totalRow = tmp
def calcMaxRow(self, cultivar):
queue = collections.deque()
queue.append(cultivar)
biggest = 0
while len(queue) != 0:
c = queue.popleft()
tmp = c.row + c.rowAmount
if tmp > biggest:
biggest = tmp
biggest = c.row + c.rowAmount
if c.parent1 != None:
queue.append(c.parent1)
if c.parent2 != None:
queue.append(c.parent2)
return biggest
def calcBestCol(self, cultivar):
return Grid.middleColumn
def calcCol(self, cultivar):
self.grid = Grid(self.totalRow)
for node in self.treeSet:
node.col = self.grid.occupy(node, self.calcBestCol(node))
def generateByName(self, name):
self.generateByID(self.nameIDMap[name.strip()])
def generateByID(self, id):
cultivar = self.IDCultivarMap[id]
self.calcRow(cultivar)
self.calcRowAmount(cultivar)
self.calcCol(cultivar)
self.printGrid(self.grid.grid)
Drawer(cultivar).draw(self.outputFolder)
| lindenquan/draw-pedigree | Generator.py | Python | mit | 6,647 |
"""
sum(2 * 2**i for i in range(i)) == 2 * (2**i - 1) == n
i == log_2(n // 2 + 1)
"""
from math import ceil, log
import time
def count_ways(n, current_power=None, memo=None):
if memo is None:
memo = {}
if current_power is None:
current_power = ceil(log(n // 2 + 1, 2))
key = (n, current_power)
if key in memo:
return memo[key]
current_term = 2 ** current_power
max_available = 2 * (2 ** (current_power + 1) - 1)
assert n <= max_available
next_max_available = 2 * (2 ** current_power - 1)
ans = 0
if n >= 2 * current_term:
if n == 2 * current_term:
ans += 1
else:
ans += count_ways(n - 2 * current_term, current_power - 1, memo)
if n >= current_term:
if n == current_term:
ans += 1
elif n - current_term <= next_max_available:
ans += count_ways(n - current_term, current_power - 1, memo)
if n <= next_max_available:
ans += count_ways(n, current_power - 1, memo)
memo[key] = ans
return ans
t0 = time.time()
print(count_ways(10 ** 25))
t1 = time.time()
print('Total time:', (t1 - t0) * 1000, 'ms')
| simonolander/euler | euler-169-sum-of-powers-of-2.py | Python | mit | 1,172 |
# coding=utf-8
"""Test configuration of toolbox."""
import importlib
import os
import pytest
from snntoolbox.bin.utils import update_setup
from snntoolbox.utils.utils import import_configparser
with open(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'requirements.txt'))) as f:
requirements = []
for s in f.readlines():
requirements.append(s.rstrip('\n').split('==')[0])
@pytest.mark.parametrize('required_module', requirements)
def test_imports_from_requirements(required_module):
assert importlib.import_module(required_module)
# Todo: Add configuration that is expected to pass.
_in_and_out = [
({}, False),
({'paths': {'path_wd': os.path.dirname(__file__),
'dataset_path': os.path.dirname(__file__),
'filename_ann': '98.96'}}, False)
]
@pytest.mark.parametrize('params, expect_pass', _in_and_out)
def test_updating_settings(params, expect_pass, _path_wd):
configparser = import_configparser()
config = configparser.ConfigParser()
config.read_dict(params)
configpath = os.path.join(str(_path_wd), 'config')
with open(configpath, 'w') as file:
config.write(file)
if expect_pass:
assert update_setup(configpath)
else:
pytest.raises(AssertionError, update_setup, configpath)
| NeuromorphicProcessorProject/snn_toolbox | tests/core/test_config.py | Python | mit | 1,363 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'New'
db.delete_table('news_new')
# Removing M2M table for field projects_relateds on 'New'
db.delete_table('news_new_projects_relateds')
# Adding model 'News'
db.create_table('news_news', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('news', ['News'])
# Adding M2M table for field projects_relateds on 'News'
db.create_table('news_news_projects_relateds', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('news', models.ForeignKey(orm['news.news'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_news_projects_relateds', ['news_id', 'project_id'])
def backwards(self, orm):
# Adding model 'New'
db.create_table('news_new', (
('body', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('news', ['New'])
# Adding M2M table for field projects_relateds on 'New'
db.create_table('news_new_projects_relateds', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('new', models.ForeignKey(orm['news.new'], null=False)),
('project', models.ForeignKey(orm['projects.project'], null=False))
))
db.create_unique('news_new_projects_relateds', ['new_id', 'project_id'])
# Deleting model 'News'
db.delete_table('news_news')
# Removing M2M table for field projects_relateds on 'News'
db.delete_table('news_news_projects_relateds')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'news.news': {
'Meta': {'object_name': 'News'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'projects_relateds': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sponsor': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['news']
| nsi-iff/nsi_site | apps/news/migrations/0003_auto__del_new__add_news.py | Python | mit | 7,906 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ArtifactsClientConfiguration(Configuration):
"""Configuration for ArtifactsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: The workspace development endpoint, for example https://myworkspace.dev.azuresynapse.net.
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
super(ArtifactsClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.endpoint = endpoint
self.api_version = "2019-06-01-preview"
self.credential_scopes = ['https://dev.azuresynapse.net/.default']
self.credential_scopes.extend(kwargs.pop('credential_scopes', []))
kwargs.setdefault('sdk_moniker', 'synapse/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/synapse/azure-synapse/azure/synapse/artifacts/aio/_configuration_async.py | Python | mit | 3,080 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RunCommandDocumentBase(Model):
"""Describes the properties of a Run Command metadata.
:param schema: The VM run command schema.
:type schema: str
:param id: The VM run command id.
:type id: str
:param os_type: The Operating System type. Possible values include:
'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2017_03_30.models.OperatingSystemTypes
:param label: The VM run command label.
:type label: str
:param description: The VM run command description.
:type description: str
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, schema, id, os_type, label, description):
super(RunCommandDocumentBase, self).__init__()
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
| AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/run_command_document_base.py | Python | mit | 1,874 |
#!/usr/bin/env python
import time
import json
import random
import re
from bottle import route, hook, response, run, static_file
@route('/')
def index():
return static_file('index.html', root = '.')
@route('/maptweets.js')
def index_css():
return static_file('maptweets.js', root = '.')
@route('/cross.jpg')
def index_css():
return static_file('cross.jpg', root = '.')
@route('/light.png')
def index_css():
return static_file('light.png', root = '.')
@route('/event.png')
def index_css():
return static_file('event.png', root = '.')
run(host = '0.0.0.0', port = 80, server = 'tornado', debug = True)
| relh/cathhacks | app.py | Python | mit | 626 |
# python3
import sys
class Bracket:
def __init__(self, bracket_type, position):
self.bracket_type = bracket_type
self.position = position
def Match(self, c):
if self.bracket_type == '[' and c == ']':
return True
if self.bracket_type == '{' and c == '}':
return True
if self.bracket_type == '(' and c == ')':
return True
return False
if __name__ == "__main__":
text = sys.stdin.read()
opening_brackets_stack = []
index = []
for i, next in enumerate(text):
match = True
if next == '(' or next == '[' or next == '{':
# Process opening bracket, write your code here
opening_brackets_stack.append(Bracket(next,i))
index.append(i+1)
if next == ')' or next == ']' or next == '}':
if len(opening_brackets_stack) == 0 or opening_brackets_stack.pop().Match(next) == False:
match = False
index.append(i+1)
break
index.pop()
# Process closing bracket, write your code here
# Printing answer, write your code here
if match == False or len(opening_brackets_stack) > 0:
print(index.pop())
else:
print("Success") | supermikol/coursera | Data Structures/Week 1/check_brackets_in_code/check_brackets.py | Python | mit | 1,323 |
# vim:ts=4:sts=4:sw=4:expandtab
import os
import satori.web.setup
def manage():
from django.core.management import execute_manager
import satori.web.settings as settings
# HACK
import django.core.management
old_fmm = django.core.management.find_management_module
def find_management_module(app_name):
if app_name == 'satori.web':
return os.path.join(os.path.dirname(__file__), 'management')
else:
return old_fmm(app_name)
django.core.management.find_management_module = find_management_module
# END OF HACK
execute_manager(settings)
| zielmicha/satori | satori.web/satori/web/__init__.py | Python | mit | 616 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler, HTTPError
from schema import Session, Feed
from jinja2.exceptions import TemplateNotFound
class Base(RequestHandler):
@property
def env(self):
return self.application.env
def get_error_html(self, status_code, **kwargs):
try:
self.render('error/%s.html' % status_code)
except TemplateNotFound:
try:
self.render('error/50x.html', status_code=status_code)
except TemplateNotFound:
self.write('epic fail')
Session.close()
def on_finish(self):
Session.remove()
def render(self, template, **kwds):
try:
template = self.env.get_template(template)
except TemplateNotFound:
raise HTTPError(404)
kwds['feeds'] = Session.query(Feed).order_by(Feed.title)
self.env.globals['request'] = self.request
self.env.globals['static_url'] = self.static_url
self.env.globals['xsrf_form_html'] = self.xsrf_form_html
self.write(template.render(kwds))
Session.close()
class NoDestinationHandler(Base):
def get(self):
raise HTTPError(404)
| mfussenegger/Huluobo | base.py | Python | mit | 1,242 |
##########################################################################
#
# Copyright 2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Generated an header, glproc.hpp, which does pretty much what GLEW does, but
covers all the functions we support.
"""
import specs.stdapi as stdapi
from dispatch import Dispatcher
from specs.glapi import glapi
from specs.glxapi import glxapi
from specs.wglapi import wglapi
from specs.cglapi import cglapi
from specs.eglapi import eglapi
from specs.glesapi import glesapi
# See http://www.opengl.org/registry/ABI/
public_symbols = set([
# GL 1.2 and ARB_multitexture
"glAccum",
"glAlphaFunc",
"glAreTexturesResident",
"glArrayElement",
"glBegin",
"glBindTexture",
"glBitmap",
"glBlendFunc",
"glCallList",
"glCallLists",
"glClear",
"glClearAccum",
"glClearColor",
"glClearDepth",
"glClearIndex",
"glClearStencil",
"glClipPlane",
"glColor3b",
"glColor3bv",
"glColor3d",
"glColor3dv",
"glColor3f",
"glColor3fv",
"glColor3i",
"glColor3iv",
"glColor3s",
"glColor3sv",
"glColor3ub",
"glColor3ubv",
"glColor3ui",
"glColor3uiv",
"glColor3us",
"glColor3usv",
"glColor4b",
"glColor4bv",
"glColor4d",
"glColor4dv",
"glColor4f",
"glColor4fv",
"glColor4i",
"glColor4iv",
"glColor4s",
"glColor4sv",
"glColor4ub",
"glColor4ubv",
"glColor4ui",
"glColor4uiv",
"glColor4us",
"glColor4usv",
"glColorMask",
"glColorMaterial",
"glColorPointer",
"glCopyPixels",
"glCopyTexImage1D",
"glCopyTexImage2D",
"glCopyTexSubImage1D",
"glCopyTexSubImage2D",
"glCullFace",
"glDeleteLists",
"glDeleteTextures",
"glDepthFunc",
"glDepthMask",
"glDepthRange",
"glDisable",
"glDisableClientState",
"glDrawArrays",
"glDrawBuffer",
"glDrawElements",
"glDrawPixels",
"glEdgeFlag",
"glEdgeFlagPointer",
"glEdgeFlagv",
"glEnable",
"glEnableClientState",
"glEnd",
"glEndList",
"glEvalCoord1d",
"glEvalCoord1dv",
"glEvalCoord1f",
"glEvalCoord1fv",
"glEvalCoord2d",
"glEvalCoord2dv",
"glEvalCoord2f",
"glEvalCoord2fv",
"glEvalMesh1",
"glEvalMesh2",
"glEvalPoint1",
"glEvalPoint2",
"glFeedbackBuffer",
"glFinish",
"glFlush",
"glFogf",
"glFogfv",
"glFogi",
"glFogiv",
"glFrontFace",
"glFrustum",
"glGenLists",
"glGenTextures",
"glGetBooleanv",
"glGetClipPlane",
"glGetDoublev",
"glGetError",
"glGetFloatv",
"glGetIntegerv",
"glGetLightfv",
"glGetLightiv",
"glGetMapdv",
"glGetMapfv",
"glGetMapiv",
"glGetMaterialfv",
"glGetMaterialiv",
"glGetPixelMapfv",
"glGetPixelMapuiv",
"glGetPixelMapusv",
"glGetPointerv",
"glGetPolygonStipple",
"glGetString",
"glGetTexEnvfv",
"glGetTexEnviv",
"glGetTexGendv",
"glGetTexGenfv",
"glGetTexGeniv",
"glGetTexImage",
"glGetTexLevelParameterfv",
"glGetTexLevelParameteriv",
"glGetTexParameterfv",
"glGetTexParameteriv",
"glHint",
"glIndexMask",
"glIndexPointer",
"glIndexd",
"glIndexdv",
"glIndexf",
"glIndexfv",
"glIndexi",
"glIndexiv",
"glIndexs",
"glIndexsv",
"glIndexub",
"glIndexubv",
"glInitNames",
"glInterleavedArrays",
"glIsEnabled",
"glIsList",
"glIsTexture",
"glLightModelf",
"glLightModelfv",
"glLightModeli",
"glLightModeliv",
"glLightf",
"glLightfv",
"glLighti",
"glLightiv",
"glLineStipple",
"glLineWidth",
"glListBase",
"glLoadIdentity",
"glLoadMatrixd",
"glLoadMatrixf",
"glLoadName",
"glLogicOp",
"glMap1d",
"glMap1f",
"glMap2d",
"glMap2f",
"glMapGrid1d",
"glMapGrid1f",
"glMapGrid2d",
"glMapGrid2f",
"glMaterialf",
"glMaterialfv",
"glMateriali",
"glMaterialiv",
"glMatrixMode",
"glMultMatrixd",
"glMultMatrixf",
"glNewList",
"glNormal3b",
"glNormal3bv",
"glNormal3d",
"glNormal3dv",
"glNormal3f",
"glNormal3fv",
"glNormal3i",
"glNormal3iv",
"glNormal3s",
"glNormal3sv",
"glNormalPointer",
"glOrtho",
"glPassThrough",
"glPixelMapfv",
"glPixelMapuiv",
"glPixelMapusv",
"glPixelStoref",
"glPixelStorei",
"glPixelTransferf",
"glPixelTransferi",
"glPixelZoom",
"glPointSize",
"glPolygonMode",
"glPolygonOffset",
"glPolygonStipple",
"glPopAttrib",
"glPopClientAttrib",
"glPopMatrix",
"glPopName",
"glPrioritizeTextures",
"glPushAttrib",
"glPushClientAttrib",
"glPushMatrix",
"glPushName",
"glRasterPos2d",
"glRasterPos2dv",
"glRasterPos2f",
"glRasterPos2fv",
"glRasterPos2i",
"glRasterPos2iv",
"glRasterPos2s",
"glRasterPos2sv",
"glRasterPos3d",
"glRasterPos3dv",
"glRasterPos3f",
"glRasterPos3fv",
"glRasterPos3i",
"glRasterPos3iv",
"glRasterPos3s",
"glRasterPos3sv",
"glRasterPos4d",
"glRasterPos4dv",
"glRasterPos4f",
"glRasterPos4fv",
"glRasterPos4i",
"glRasterPos4iv",
"glRasterPos4s",
"glRasterPos4sv",
"glReadBuffer",
"glReadPixels",
"glRectd",
"glRectdv",
"glRectf",
"glRectfv",
"glRecti",
"glRectiv",
"glRects",
"glRectsv",
"glRenderMode",
"glRotated",
"glRotatef",
"glScaled",
"glScalef",
"glScissor",
"glSelectBuffer",
"glShadeModel",
"glStencilFunc",
"glStencilMask",
"glStencilOp",
"glTexCoord1d",
"glTexCoord1dv",
"glTexCoord1f",
"glTexCoord1fv",
"glTexCoord1i",
"glTexCoord1iv",
"glTexCoord1s",
"glTexCoord1sv",
"glTexCoord2d",
"glTexCoord2dv",
"glTexCoord2f",
"glTexCoord2fv",
"glTexCoord2i",
"glTexCoord2iv",
"glTexCoord2s",
"glTexCoord2sv",
"glTexCoord3d",
"glTexCoord3dv",
"glTexCoord3f",
"glTexCoord3fv",
"glTexCoord3i",
"glTexCoord3iv",
"glTexCoord3s",
"glTexCoord3sv",
"glTexCoord4d",
"glTexCoord4dv",
"glTexCoord4f",
"glTexCoord4fv",
"glTexCoord4i",
"glTexCoord4iv",
"glTexCoord4s",
"glTexCoord4sv",
"glTexCoordPointer",
"glTexEnvf",
"glTexEnvfv",
"glTexEnvi",
"glTexEnviv",
"glTexGend",
"glTexGendv",
"glTexGenf",
"glTexGenfv",
"glTexGeni",
"glTexGeniv",
"glTexImage1D",
"glTexImage2D",
"glTexParameterf",
"glTexParameterfv",
"glTexParameteri",
"glTexParameteriv",
"glTexSubImage1D",
"glTexSubImage2D",
"glTranslated",
"glTranslatef",
"glVertex2d",
"glVertex2dv",
"glVertex2f",
"glVertex2fv",
"glVertex2i",
"glVertex2iv",
"glVertex2s",
"glVertex2sv",
"glVertex3d",
"glVertex3dv",
"glVertex3f",
"glVertex3fv",
"glVertex3i",
"glVertex3iv",
"glVertex3s",
"glVertex3sv",
"glVertex4d",
"glVertex4dv",
"glVertex4f",
"glVertex4fv",
"glVertex4i",
"glVertex4iv",
"glVertex4s",
"glVertex4sv",
"glVertexPointer",
"glViewport",
# GLX 1.3 and GLX_ARB_get_proc_address
"glXChooseVisual",
"glXCreateContext",
"glXDestroyContext",
"glXMakeCurrent",
"glXCopyContext",
"glXSwapBuffers",
"glXCreateGLXPixmap",
"glXDestroyGLXPixmap",
"glXQueryExtension",
"glXQueryVersion",
"glXIsDirect",
"glXGetConfig",
"glXGetCurrentContext",
"glXGetCurrentDrawable",
"glXWaitGL",
"glXWaitX",
"glXUseXFont",
"glXQueryExtensionsString",
"glXQueryServerString",
"glXGetClientString",
"glXGetCurrentDisplay",
"glXChooseFBConfig",
"glXGetFBConfigAttrib",
"glXGetFBConfigs",
"glXGetVisualFromFBConfig",
"glXCreateWindow",
"glXDestroyWindow",
"glXCreatePixmap",
"glXDestroyPixmap",
"glXCreatePbuffer",
"glXDestroyPbuffer",
"glXQueryDrawable",
"glXCreateNewContext",
"glXMakeContextCurrent",
"glXGetCurrentReadDrawable",
"glXQueryContext",
"glXSelectEvent",
"glXGetSelectedEvent",
"glXGetProcAddressARB",
"glXGetProcAddress",
# WGL
#"glDebugEntry",
"wglChoosePixelFormat",
"wglCopyContext",
"wglCreateContext",
"wglCreateLayerContext",
"wglDeleteContext",
"wglDescribeLayerPlane",
"wglDescribePixelFormat",
"wglGetCurrentContext",
"wglGetCurrentDC",
"wglGetDefaultProcAddress",
"wglGetLayerPaletteEntries",
"wglGetPixelFormat",
"wglGetProcAddress",
"wglMakeCurrent",
"wglRealizeLayerPalette",
"wglSetLayerPaletteEntries",
"wglSetPixelFormat",
"wglShareLists",
"wglSwapBuffers",
"wglSwapLayerBuffers",
"wglSwapMultipleBuffers",
"wglUseFontBitmapsA",
"wglUseFontBitmapsW",
"wglUseFontOutlinesA",
"wglUseFontOutlinesW",
])
# EGL 1.4
public_symbols.update([
"eglBindAPI",
"eglBindTexImage",
"eglChooseConfig",
"eglCopyBuffers",
"eglCreateContext",
"eglCreatePbufferFromClientBuffer",
"eglCreatePbufferSurface",
"eglCreatePixmapSurface",
"eglCreateWindowSurface",
"eglDestroyContext",
"eglDestroySurface",
"eglGetConfigAttrib",
"eglGetConfigs",
"eglGetCurrentContext",
"eglGetCurrentDisplay",
"eglGetCurrentSurface",
"eglGetDisplay",
"eglGetError",
"eglGetProcAddress",
"eglInitialize",
"eglMakeCurrent",
"eglQueryAPI",
"eglQueryContext",
"eglQueryString",
"eglQuerySurface",
"eglReleaseTexImage",
"eglReleaseThread",
"eglSurfaceAttrib",
"eglSwapBuffers",
"eglSwapInterval",
"eglTerminate",
"eglWaitClient",
"eglWaitGL",
"eglWaitNative",
])
class GlDispatcher(Dispatcher):
def header(self):
print '''
#if defined(_WIN32)
extern HINSTANCE __libGlHandle;
#else
extern void * __libGlHandle;
#endif
void * __getPublicProcAddress(const char *procName);
void * __getPrivateProcAddress(const char *procName);
'''
def is_public_function(self, function):
return function.name in public_symbols or function.name.startswith('CGL')
if __name__ == '__main__':
print
print '#ifndef _GLPROC_HPP_'
print '#define _GLPROC_HPP_'
print
print '#include "glimports.hpp"'
print '#include "os.hpp"'
print
print
dispatcher = GlDispatcher()
dispatcher.header()
print '#if defined(TRACE_EGL)'
print
dispatcher.dispatch_api(eglapi)
print '#elif defined(_WIN32)'
print
dispatcher.dispatch_api(wglapi)
print '#elif defined(__APPLE__)'
dispatcher.dispatch_api(cglapi)
print '#else'
print
dispatcher.dispatch_api(glxapi)
print '#endif'
print
dispatcher.dispatch_api(glapi)
print
dispatcher.dispatch_api(glesapi)
print
print '#endif /* !_GLPROC_HPP_ */'
print
| xranby/apitrace | glproc.py | Python | mit | 12,065 |
import unicodedata
class SanitiseText:
ALLOWED_CHARACTERS = set()
REPLACEMENT_CHARACTERS = {
'–': '-', # EN DASH (U+2013)
'—': '-', # EM DASH (U+2014)
'…': '...', # HORIZONTAL ELLIPSIS (U+2026)
'‘': '\'', # LEFT SINGLE QUOTATION MARK (U+2018)
'’': '\'', # RIGHT SINGLE QUOTATION MARK (U+2019)
'“': '"', # LEFT DOUBLE QUOTATION MARK (U+201C)
'”': '"', # RIGHT DOUBLE QUOTATION MARK (U+201D)
'\u180E': '', # Mongolian vowel separator
'\u200B': '', # zero width space
'\u200C': '', # zero width non-joiner
'\u200D': '', # zero width joiner
'\u2060': '', # word joiner
'\uFEFF': '', # zero width non-breaking space
'\u00A0': ' ', # NON BREAKING WHITE SPACE (U+200B)
'\t': ' ', # TAB
}
@classmethod
def encode(cls, content):
return ''.join(cls.encode_char(char) for char in content)
@classmethod
def get_non_compatible_characters(cls, content):
"""
Given an input string, return a set of non compatible characters.
This follows the same rules as `cls.encode`, but returns just the characters that encode would replace with `?`
"""
return set(c for c in content if c not in cls.ALLOWED_CHARACTERS and cls.downgrade_character(c) is None)
@staticmethod
def get_unicode_char_from_codepoint(codepoint):
"""
Given a unicode codepoint (eg 002E for '.', 0061 for 'a', etc), return that actual unicode character.
unicodedata.decomposition returns strings containing codepoints, so we need to eval them ourselves
"""
# lets just make sure we aren't evaling anything weird
if not set(codepoint) <= set('0123456789ABCDEF') or not len(codepoint) == 4:
raise ValueError('{} is not a valid unicode codepoint'.format(codepoint))
return eval('"\\u{}"'.format(codepoint))
@classmethod
def downgrade_character(cls, c):
"""
Attempt to downgrade a non-compatible character to the allowed character set. May downgrade to multiple
characters, eg `… -> ...`
Will return None if character is either already valid or has no known downgrade
"""
decomposed = unicodedata.decomposition(c)
if decomposed != '' and '<' not in decomposed:
# decomposition lists the unicode code points a character is made up of, if it's made up of multiple
# points. For example the á character returns '0061 0301', as in, the character a, followed by a combining
# acute accent. The decomposition might, however, also contain a decomposition mapping in angle brackets.
# For a full list of the types, see here: https://www.compart.com/en/unicode/decomposition.
# If it's got a mapping, we're not sure how best to downgrade it, so just see if it's in the
# REPLACEMENT_CHARACTERS map. If not, then it's probably a letter with a modifier, eg á
# ASSUMPTION: The first character of a combined unicode character (eg 'á' == '0061 0301')
# will be the ascii char
return cls.get_unicode_char_from_codepoint(decomposed.split()[0])
else:
# try and find a mapping (eg en dash -> hyphen ('–': '-')), else return None
return cls.REPLACEMENT_CHARACTERS.get(c)
@classmethod
def encode_char(cls, c):
"""
Given a single unicode character, return a compatible character from the allowed set.
"""
# char is a good character already - return that native character.
if c in cls.ALLOWED_CHARACTERS:
return c
else:
c = cls.downgrade_character(c)
return c if c is not None else '?'
class SanitiseSMS(SanitiseText):
"""
Given an input string, makes it GSM and Welsh character compatible. This involves removing all non-gsm characters by
applying the following rules
* characters within the GSM character set (https://en.wikipedia.org/wiki/GSM_03.38)
and extension character set are kept
* Welsh characters not included in the default GSM character set are kept
* characters with sensible downgrades are replaced in place
* characters with diacritics (accents, umlauts, cedillas etc) are replaced with their base character, eg é -> e
* en dash and em dash (– and —) are replaced with hyphen (-)
* left/right quotation marks (‘, ’, “, ”) are replaced with ' and "
* zero width spaces (sometimes used to stop eg "gov.uk" linkifying) are removed
* tabs are replaced with a single space
* any remaining unicode characters (eg chinese/cyrillic/glyphs/emoji) are replaced with ?
"""
WELSH_DIACRITICS = set(
'àèìòùẁỳ' 'ÀÈÌÒÙẀỲ' # grave
'áéíóúẃý' 'ÁÉÍÓÚẂÝ' # acute
'äëïöüẅÿ' 'ÄËÏÖÜẄŸ' # diaeresis
'âêîôûŵŷ' 'ÂÊÎÔÛŴŶ' # carets
)
EXTENDED_GSM_CHARACTERS = set('^{}\\[~]|€')
GSM_CHARACTERS = set(
'@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x1bÆæßÉ !"#¤%&\'()*+,-./0123456789:;<=>?' +
'¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà'
) | EXTENDED_GSM_CHARACTERS
ALLOWED_CHARACTERS = GSM_CHARACTERS | WELSH_DIACRITICS
# some welsh characters are in GSM and some aren't - we need to distinguish between these for counting fragments
WELSH_NON_GSM_CHARACTERS = WELSH_DIACRITICS - GSM_CHARACTERS
class SanitiseASCII(SanitiseText):
"""
As SMS above, but the allowed characters are printable ascii, from character range 32 to 126 inclusive.
[chr(x) for x in range(32, 127)]
"""
ALLOWED_CHARACTERS = set(
' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' +
'[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'
)
| alphagov/notifications-utils | notifications_utils/sanitise_text.py | Python | mit | 5,965 |
"""Defines all the classes needed to create a Report from scratch.
Report: a single document about the state of a cafe.
Category: a class of products sharing common characteristics.
Product: a single item in a cafe.
Unit: a measure of products.
FullProduct: a product with its quantity.
"""
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Report(models.Model):
"""Stores a single report created from selected FullProducts.
Date of creation is set automatically.
Currently logged in user is assigned to report as creator.
"""
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(
'employees.Employee',
null=True,
blank=True,
default=None
)
caffe = models.ForeignKey(
'caffe.Caffe',
null=True,
blank=False,
default=None
)
class Meta:
ordering = ('-created_on',)
default_permissions = ('add', 'change', 'delete', 'view')
def save(self, *args, **kwargs):
"""Save model into the database."""
if self.creator is not None:
if self.caffe != self.creator.caffe:
raise ValidationError(
_('Kawiarnia i kawiarnia tworzącego powinna się zgadzać')
)
self.full_clean()
super(Report, self).save(*args, **kwargs)
def __str__(self):
return 'Report created: {:%Y-%m-%d %H:%M} {}'.format(
self.created_on,
self.creator
)
class Category(models.Model):
"""Stores the category of a product, e.g. cake, tea, sandwich.
Intended to be created once and then to reuse it in future reports.
"""
name = models.CharField(max_length=100)
caffe = models.ForeignKey(
'caffe.Caffe',
null=True,
blank=False,
default=None
)
class Meta:
ordering = ('name',)
unique_together = ('name', 'caffe',)
default_permissions = ('add', 'change', 'delete', 'view')
def save(self, *args, **kwargs):
"""Save model into the database."""
self.full_clean()
super(Category, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.name)
class Unit(models.Model):
"""Stores a type of unit used to count the amount of products.
Intended to be created once and then to reuse it in future reports.
"""
name = models.CharField(max_length=100)
caffe = models.ForeignKey(
'caffe.Caffe',
null=True,
blank=False,
default=None
)
class Meta:
ordering = ('name',)
unique_together = ('name', 'caffe',)
default_permissions = ('add', 'change', 'delete', 'view')
def save(self, *args, **kwargs):
"""Save model into the database."""
self.full_clean()
super(Unit, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.name)
class Product(models.Model):
"""Stores a specific product, e.g. brownie, earl grey, PB&J sandwich.
Intended to be created once and then to reuse it in future reports.
Unit specifies how the amount of product is counted.
"""
name = models.CharField(max_length=100)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
unit = models.ForeignKey('Unit', on_delete=models.CASCADE)
caffe = models.ForeignKey(
'caffe.Caffe',
null=True,
blank=False,
default=None
)
class Meta:
ordering = ('name',)
unique_together = ('name', 'caffe',)
default_permissions = ('add', 'change', 'delete', 'view')
def save(self, *args, **kwargs):
"""Save model into the database."""
if self.caffe != self.category.caffe:
raise ValidationError(
_('Kawiarnia i kawiarnia kategorii nie zgadza się.')
)
if self.caffe != self.unit.caffe:
raise ValidationError(
_('Kawiarnia i kawiarnia jednostki nie zgadza się.')
)
self.full_clean()
super(Product, self).save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.name)
class FullProduct(models.Model):
"""Stores a product with its quantity.
Intended to be used once, only in one report.
"""
product = models.ForeignKey('Product')
amount = models.FloatField(validators=[MinValueValidator(0)])
report = models.ForeignKey(
'Report',
blank=True,
null=True,
related_name='full_products'
)
caffe = models.ForeignKey(
'caffe.Caffe',
null=True,
blank=False,
default=None
)
def clean(self, *args, **kwargs):
"""Clean data and check validation."""
# checks if there exists two same products
full_products = []
if self.report is not None:
full_products = self.report.full_products.all()
for full_product in full_products:
if full_product.product == self.product:
raise ValidationError(
_('Report should not contain two same products.')
)
super(FullProduct, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
"""Save model into the database."""
if self.report:
if self.caffe != self.report.caffe:
raise ValidationError(
_('Kawiarnia i kawiarnia raportu nie zgadza się.')
)
if self.caffe != self.product.caffe:
raise ValidationError(
_('Kawiarnia i kawiarnia produktu nie zgadza się.')
)
self.full_clean()
super(FullProduct, self).save(*args, **kwargs)
def __str__(self):
return '{0}, {1:g} {2}'.format(
self.product,
self.amount,
self.product.unit
)
| VirrageS/io-kawiarnie | caffe/reports/models.py | Python | mit | 6,128 |
"""
WSGI config for sitefinder_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sitefinder_project.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Wrap werkzeug debugger if DEBUG is on
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True)
except ImportError:
pass
| robjordan/sitefinder | src/sitefinder_project/wsgi.py | Python | mit | 942 |
from transmute_core import *
# from .handler import convert_to_handler
# from .route import route
from .route_set import RouteSet
from .url import url_spec
from .swagger import add_swagger
| toumorokoshi/tornado-transmute | tornado_transmute/__init__.py | Python | mit | 189 |
# -*- coding: utf-8 -*-
import re
import json
import traceback
import sys
import time
import datetime
import random
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
from datetime import date
from scrapy.selector import Selector
from dateutil.relativedelta import relativedelta
if __name__ == '__main__':
import sys
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from base_crawler import BaseCrawler
from crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
self.pin_pwd_error_times = 0
self.info_res = ''
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_verify_type(self, **kwargs):
return 'SMS'
def login(self, **kwargs):
ProvinceID = '07'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
cookie_url = 'http://nm.189.cn/selfservice/service/userLogin'
cookie_data = {
"number" : kwargs['tel'],
"intLoginType":"4",
"areaCode":"0471",
"isBusinessCustType":"N",
"identifyType":"B",
"userLoginType":"4",
"password":"",
"randomPass":"",
"noCheck":"N",
"isSSOLogin":"Y",
"sRand":"SSOLogin"
}
code, key, resp = self.post(cookie_url, data=json.dumps(cookie_data))
if code != 0:
return code, key
personal_info_url = 'http://www.189.cn/dqmh/userCenter/userInfo.do?method=editUserInfo_new&fastcode=10000557&cityCode=nm'
for retry in xrange(self.max_retry):
code, key, tel_info_res = self.get(personal_info_url)
if code != 0:
return code, key
if u'真实姓名' in tel_info_res.text:
self.info_res = tel_info_res.text
return 0, "success"
else:
pass
else:
self.log('crawler', "request_error", tel_info_res)
return 9, "website_busy_error"
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMS"
send_sms_data = {
"phone": kwargs['tel']
}
code, key, resp = self.post(send_sms_url, data=json.dumps(send_sms_data))
if code != 0:
return code, key, ""
if resp.text:
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "Not json file : {}, resp:{}".format(error, resp.history), resp)
return 9, 'website_busy_error', ''
if resp_json_response.get('flag', '') == '0':
return 0, "success", ""
elif resp_json_response.get('flag', '') == '2':
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error", ''
else:
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMSCheck"
check_sms_data = {
'code': kwargs['sms_code']
}
code, key, resp = self.post(check_sms_url, data=json.dumps(check_sms_data))
if code != 0:
return code, key
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, 'json_error'
if resp_json_response.get('flag', '') == '0':
self.log('crawler', "verify_error", resp)
return 2, "verify_error"
# 如果直接返回详单数据按成功处理。
elif resp_json_response.get('flag', '') == '1' or 'resultNum' in resp.text or 'items' in resp.text:
return 0, "success"
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error"
def crawl_info(self, **kwargs):
"""
爬取帳戶資訊
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
info: dict, 帳戶信息,參考帳戶信息格式
"""
user_info = {}
selector = Selector(text=self.info_res)
try:
full_name = selector.xpath('//input[@name="realName"]/@value').extract()
user_info['full_name'] = full_name[0] if full_name else ''
id_card = selector.xpath('//input[@name="certificateNumber"]/@value').extract()
user_info['id_card'] = id_card[0] if id_card else ''
address = re.findall(u'id="address".*?;">(.*?)</textarea>', self.info_res)
user_info['address'] = address[0] if address else ''
user_info['open_date'] = ""
user_info['is_realname_register'] = True
except:
error = traceback.format_exc()
self.log('crawler', "html_error : %s" % error, '')
return 9, "html_error", {}
return 0, "success", user_info
def random_sleep(self, tm, modulus=3):
time.sleep(random.uniform(tm / modulus / 1.5, 1.5 * tm / modulus))
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
call_log = []
crawl_num = 0
call_log_url = "http://nm.189.cn/selfservice/bill/xdQuery"
today = date.today()
missing_list = []
pos_missing = []
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d" % (query_date.year, query_date.month)
call_log_data = {
"billingCycle": "{}{}".format(query_date.year, str(query_date.month).zfill(2)),
'accNbr': kwargs['tel'],
'accNbrType': '4',
'areaCode': '0478',
'pageNo': -1,
'pageRecords': -1,
'prodSpecId': '378',
'qtype': '0',
'isYWlQuery': 'N',
}
header = {
'Referer': 'http://nm.189.cn/selfservice/bill/xd',
'Host': 'nm.189.cn',
'Content-Type': 'application/json'
}
start_time = time.time()
end_time = start_time + 10
aid_time_dict = dict()
retry_times = self.max_retry
log_for_retry = []
while 1:
log_for_retry.append((1, retry_times))
retry_times -= 1
code, key, resp = self.post(call_log_url, data=json.dumps(call_log_data), headers=header)
if code:
missing_flag = True
elif 'POR-2102' in resp.text:
# 无查询结果,这个月没有数据
missing_flag = False
else:
flag = True
break
now_time = time.time()
if retry_times >= 0:
aid_time_dict.update({retry_times: time.time()})
elif now_time < end_time:
loop_time = aid_time_dict.get(0, time.time())
left_time = end_time - loop_time
self.random_sleep(left_time)
else:
flag = False
if missing_flag:
missing_list.append(search_month)
else:
pos_missing.append(search_month)
break
self.log('crawler', '{}重试记录{}'.format(search_month, log_for_retry), '')
if not flag:
continue
# for retry in range(self.max_retry):
# code, key, resp = self.post(call_log_url, data=json.dumps(call_log_data), headers=header)
# if code != 0:
# missing_flag = True
# # 无查询结果 , 这个月没有数据
# elif 'POR-2102' in resp.text:
# missing_flag = False
# else:
# break
# else:
# if missing_flag:
# missing_list.append(search_month)
# else:
# self.log('crawler', '未查询到您的详单信息', resp)
# pos_missing.append(search_month)
# continue
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', 'html_error : %s' % error, resp)
missing_list.append(search_month)
continue
if resp_json_response.get('resultCode', '') == 'POR-0000':
status_key, status_level, message, log_data = self.call_log_get(resp.text, search_month)
if status_level != 0:
crawl_num += 1
self.log('crawler', message, resp)
missing_list.append(search_month)
continue
else:
call_log.extend(log_data)
else:
self.log('crawler', 'html_error', resp)
missing_list.append(search_month)
if crawl_num > 0:
return 9, 'crawl_error', call_log, missing_list, pos_missing
if len(missing_list+pos_missing) == 6:
return 9, 'website_busy_error', call_log, missing_list, pos_missing
return 0, "success", call_log, missing_list, pos_missing
def call_log_get(self, response, search_month):
"""
| `update_time` | string | 更新时间戳 |
| `call_cost` | string | 爬取费用 |
| `call_time` | string | 通话起始时间 |
| `call_method` | string | 呼叫类型(主叫, 被叫) |
| `call_type` | string | 通话类型(本地, 长途) |
| `call_from` | string | 本机通话地 |
| `call_to` | string | 对方归属地 |
| `call_duration` | string | 通话时长 |
"""
try:
json_logs = json.loads(response)
except:
error = traceback.format_exc()
return 'json_error', 9, 'json_error %s' % error, []
if json_logs.get('resultCode', '') == 'POR-0000':
records = []
for item in json_logs.get('items', []):
data = {}
try:
data['month'] = search_month
data['call_cost'] = item.get('fee', '')
# 以下几行 转换成时间戳
temp = '{} {}'.format(item.get('converseDate', ''), item.get('converseTime', ''))
call_time = re.findall('\d{2}', temp)
call_time_change = call_time[0] + call_time[1] + '-' + call_time[2] + '-' + call_time[3] + ' ' + \
call_time[4] + ':' + call_time[5] + ':' + call_time[6]
timeArray = time.strptime(call_time_change, "%Y-%m-%d %H:%M:%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
data['call_time'] = call_time_timeStamp
data['call_method'] = item.get('callType', '')
data['call_type'] = item.get('converseType', '')
# data['call_from'] = item.get('converseAddr', '')
raw_call_from = item.get('converseAddr', '').strip()
call_from, error = self.formatarea(raw_call_from)
if call_from:
data['call_from'] = call_from
else:
# self.log("crawler", "{} {}".format(error, raw_call_from), "")
data['call_from'] = raw_call_from
data['call_to'] = item.get('callArea', '')
data['call_tel'] = item.get('callingNbr', '')
# 以下几行转换成秒
durations = item.get('converseDuration', '').split("'")
duration = int(durations[0]) * 3600 + int(durations[1]) * 60 + int(durations[2])
data['call_duration'] = str(duration)
records.append(data)
except:
error = traceback.format_exc()
return 'html_error', 9, 'html_error %s' % error, []
return 'success', 0, 'success', records
else:
return 'html_error', 9, 'html_error', []
def crawl_phone_bill(self, **kwargs):
phone_bill = list()
missing_list = []
month_bill_url = 'http://nm.189.cn/selfservice/bill/khzdQuery'
header = {
'Referer': 'http://nm.189.cn/selfservice/bill/khzd-mini?fastcode=10000542&cityCode=nm',
'Host': 'nm.189.cn',
'Content-Type': 'application/json'
}
for month in self.__monthly_period(6, '%Y%m'):
post_data = {
'accNbr': kwargs['tel'],
'accNbrType': '4',
'areaCode': '0478',
'billingCycle': month,
'prodSpecId': '378',
'prodSpecName': '',
'smsCode': '',
}
for retry in xrange(self.max_retry):
code, key, resp = self.post(month_bill_url, headers=header, data=json.dumps(post_data))
if code != 0:
continue
else:
break
else:
missing_list.append(month)
continue
key, level, message, result = self.phone_bill_get(month, resp)
if level != 0 or result['bill_amount'] == '' or result['bill_amount'] == '0.00':
missing_list.append(month)
continue
phone_bill.append(result)
if len(missing_list) == 6:
return 9, 'website_busy_error', phone_bill, missing_list
today = date.today()
today_month = "%d%02d" % (today.year, today.month)
if today_month in missing_list:
missing_list.remove(today_month)
return 0, 'success', phone_bill, missing_list
def phone_bill_get(self, month, resp):
month_bill = {
'bill_month': month,
'bill_amount': '',
'bill_package': '',
'bill_ext_calls': '',
'bill_ext_data': '',
'bill_ext_sms': '',
'bill_zengzhifei': '',
'bill_daishoufei': '',
'bill_qita': ''
}
try:
bill = json.loads(resp.text)
except:
error = traceback.format_exc()
self.log('crawler', 'html_error'+error, resp)
return 'html_error', 9, 'html_error'+error, {}
if bill['resultSet'] == None:
self.log('website', 'website_busy_error', resp)
return 'website_busy_error', 9, 'website_busy_error', {}
bill_amounts = re.findall(u'费用合计:([\d.]+)元', resp.text)
if bill_amounts:
month_bill['bill_amount'] = bill_amounts[0]
bill_package = re.findall(u"套餐费</span><span class='pricebills'>([\d.]+)</span>", resp.text)
if bill_package:
month_bill['bill_package'] = bill_package[0]
return 'success', 0, 'website_busy_error', month_bill
def __monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
for month_offset in range(0, length):
yield (current_time - relativedelta(months=month_offset + 1)).strftime(strf)
if __name__ == '__main__':
c = Crawler()
USER_ID = "15335686893"
# USER_ID = "15335686896"
USER_PASSWORD = "135126"
USER_FULL_NAME = "薛胜英"
USER_ID_CARD = "152801198002090347"
c.self_test(tel=USER_ID,pin_pwd=USER_PASSWORD)
| Svolcano/python_exercise | dianhua/worker/crawler/china_telecom/neimenggu/main.py | Python | mit | 17,659 |
from app import app
from flask import jsonify
from backup.api import api as api_backup
from backup.api import api_restore
# Blueprints registration
app.register_blueprint(api_backup, url_prefix='/api/backup')
app.register_blueprint(api_restore, url_prefix='/api/restore')
@app.route('/api/help', methods = ['GET'])
def help():
"""Print available functions."""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
return jsonify(func_list)
if __name__ == '__main__':
app.run('0.0.0.0') | cedricmenec/mysql-backup-service | main.py | Python | mit | 626 |
from typing import Dict, Any, List, Optional
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
import depccg.parsing
from depccg.types import ScoringResult, Token
from depccg.allennlp.predictor.supertagger_predictor import SupertaggerPredictor
from depccg.allennlp.utils import read_params
from depccg.cat import Category
from depccg.printer.my_json import json_of
@Predictor.register('parser-predictor')
class ParserPredictor(SupertaggerPredictor):
def __init__(
self,
model: Model,
dataset_reader: DatasetReader,
grammar_json_path: str,
disable_category_dictionary: bool = False,
disable_seen_rules: bool = False,
parsing_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(model, dataset_reader)
(
self.apply_binary_rules,
self.apply_unary_rules,
self.category_dict,
self.root_categories,
) = read_params(
grammar_json_path,
disable_category_dictionary,
disable_seen_rules
)
self.parsing_kwargs = parsing_kwargs or {}
def _make_json(self, output_dicts: List[Dict[str, Any]]) -> List[JsonDict]:
categories = None
score_results = []
doc = []
for output_dict in super()._make_json(output_dicts):
if categories is None:
categories = [
Category.parse(category)
for category in output_dict['categories']
]
tokens = [
Token.of_word(word)
for word in output_dict['words'].split(' ')
]
doc.append(tokens)
dep_scores = numpy.array(output_dict['heads']) \
.reshape(output_dict['heads_shape']) \
.astype(numpy.float32)
tag_scores = numpy.array(output_dict['head_tags']) \
.reshape(output_dict['head_tags_shape']) \
.astype(numpy.float32)
score_results.append(ScoringResult(tag_scores, dep_scores))
if self.category_dict is not None:
doc, score_results = depccg.parsing.apply_category_filters(
doc,
score_results,
categories,
self.category_dict,
)
results = depccg.parsing.run(
doc,
score_results,
categories,
self.root_categories,
self.apply_binary_rules,
self.apply_unary_rules,
**self.parsing_kwargs,
)
for output_dict, trees in zip(output_dicts, results):
output_dict['trees'] = []
for tree, log_prob in trees:
tree_dict = json_of(tree)
tree_dict['log_prob'] = log_prob
output_dict['trees'].append(tree_dict)
return output_dicts
| masashi-y/depccg | depccg/allennlp/predictor/parser_predictor.py | Python | mit | 3,044 |
from mediaviewer.models.person import Person
class Writer(Person):
class Meta:
app_label = 'mediaviewer'
db_table = 'writer'
| kyokley/MediaViewer | mediaviewer/models/writer.py | Python | mit | 146 |
#!/usr/bin/python
"""
html_writer.py - Construct HTML pages
"""
import datetime
import os
import types
import numpy as np
import xml.dom.minidom
class BaseHtmlWriter:
def __init__(self):
self.div_counter = 0
pass
def relative_to_full_path(self, relpath):
raise Exception("class not implemented")
def write(self, s):
raise Exception("class not implemented")
def write_header(self):
self.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
self.write('<head>\n')
self.write('<script type="text/javascript" src="expandCollapse.js"></script>\n')
self.write('</head>\n')
self.write('<html>\n<body>\n')
now = datetime.datetime.now()
self.write('<div>Written at %s</div>' % now)
def write_js(self, path):
if os.path.exists(path + '/expandCollapse.js'):
return
fp = open(path + '/expandCollapse.js', 'w')
fp.write("""function toggleMe(a){
var e=document.getElementById(a);
if(!e)return true;
if(e.style.display=="none"){
e.style.display="block"
} else {
e.style.display="none"
}
return true;
}
""")
fp.close()
def write_ol(self, l):
self.write("<ol>\n")
for mem in l:
self.write(" <li>%s</li>\n" % str(mem))
self.write("</ol>\n")
def write_ul(self, l):
self.write("<ul>\n")
for mem in l:
self.write(" <li>%s</li>\n" % str(mem))
self.write("</ul>\n")
def write_table(self, rowdicts, headers=None, border=1, decimal=None, rowcolors=None):
"""
In order to print the row number, use the title '#' in headers and
write_table() will automatically fill that column with the row numbers.
"""
def to_string(x, decimal=None):
if type(x) == types.StringType:
return x
if type(x) in (types.IntType, np.int16, np.int32, np.int64):
return '%d' % x
if type(x) in (types.FloatType, np.float32, np.float64):
if np.isnan(x):
return 'N/A'
if decimal is not None:
return eval("'%%.%df' %% x" % decimal)
return "%g" % x
return str(x)
if not headers:
headers = set()
for rowdict in rowdicts:
for key in rowdict.keys():
headers.add(to_string(key))
headers = sorted(headers)
self.write('<table border=%d>\n' % border)
self.write('<tr><td><b>' + '</b></td><td><b>'.join(headers) + '</b></td></tr>\n')
for i, rowdict in enumerate(rowdicts):
rowdict['#'] = '%d' % i
values = [to_string(rowdict.get(key, ""), decimal) for key in headers]
if rowcolors != None:
self.write('<tr bgcolor=#%s>' % rowcolors[i])
else:
self.write('<tr>')
self.write('<td>' + '</td><td>'.join(values) + '</td></tr>\n')
self.write('</table>\n')
def table_start(self, border=1):
self.write('<table border=%d>\n' % border)
def table_writerow(self, values):
self.write('<tr><td>' + '</td><td>'.join(values) + '</td></tr>\n')
def table_end(self):
self.write("</table>\n")
def insert_toggle(self, div_id=None, start_here=False, label='Show'):
if not div_id:
div_id = "DIV%05d" % self.div_counter
self.div_counter += 1
elif type(div_id) != types.StringType:
raise ValueError("HTML div ID must be a string")
self.write('<input type="button" class="button" onclick="return toggleMe(\'%s\')" value="%s">\n'
% (div_id, label))
if start_here:
self.div_start(div_id)
return div_id
def div_start(self, div_id):
self.write('<div id="%s" style="display:none">' % div_id)
def div_end(self):
self.write('</div>\n')
def embed_img(self, fig_fname, alternative_string=""):
self.write('<img src="' + fig_fname + '" atl="' + alternative_string + '" />')
def embed_svg(self, fig_fname, width=320, height=240, name=''):
self.write('<a href="%s.svg">' % name)
self.extract_svg_from_file(fig_fname, width=width, height=height)
self.write('</a>')
#self.write('<object data="%s" type="image/svg+xml" width="%dpt" height="%dpt" name="%s" frameborder="0" marginwidth="0" marginheight="0"/></object>'
# % (fig_fname, width, height, name))
def embed_matplotlib_figure(self, fig, width=None, height=None, name=None):
"""
Adds a matplotlib figure into the HTML as an inline SVG
Arguments:
fig - a matplotlib Figure object
width - the desired width of the figure in pixels
height - the desired height of the figure in pixels
name - if not None, the SVG will be written to a file with that name will
be linked to from the inline figure
"""
if name:
svg_filename = self.relative_to_full_path(name + '.svg')
self.write('<a href="%s.svg">' % name)
else:
svg_filename = '.svg'
width = width or (fig.get_figwidth() * fig.get_dpi())
height = height or (fig.get_figheight() * fig.get_dpi())
fig.savefig(svg_filename, format='svg')
self.extract_svg_from_file(svg_filename, width=width, height=height)
if name:
self.write('</a>')
else:
os.remove(svg_filename)
def embed_dot_inline(self, Gdot, width=320, height=240, name=None):
"""
Converts the DOT graph to an SVG DOM and uses the inline SVG option to
add it directly into the HTML (without creating a separate SVG file).
"""
if name:
svg_filename = self.relative_to_full_path(name + '.svg')
self.write('<a href="%s.svg">' % name)
else:
svg_filename = '.svg'
Gdot.write(svg_filename, prog='dot', format='svg')
self.extract_svg_from_file(svg_filename, width=width, height=height)
if name:
self.write('</a>')
else:
os.remove(svg_filename)
def embed_dot(self, Gdot, name, width=320, height=240):
"""
Converts the DOT graph to an SVG DOM and uses the inline SVG option to
add it directly into the HTML (without creating a separate SVG file).
"""
svg_filename = self.relative_to_full_path(name + '.svg')
Gdot.write(svg_filename, prog='dot', format='svg')
self.embed_svg(svg_filename, width=width, height=height, name=name)
def extract_svg_from_xmldom(self, dom, width=320, height=240):
svg = dom.getElementsByTagName("svg")[0]
svg.setAttribute('width', '%dpt' % width)
svg.setAttribute('height', '%dpt' % height)
self.write(svg.toprettyxml(indent=' ', newl=''))
def extract_svg_from_file(self, fname, width=320, height=240):
xmldom = xml.dom.minidom.parse(fname)
self.extract_svg_from_xmldom(xmldom, width, height)
def branch(self, relative_path, link_text=None):
"""
Branches the HTML file by creating a new HTML and adding a link to it with the desired text
"""
if link_text is None:
link_text = relative_path
self.write("<a href=\"" + relative_path + ".html\">" + link_text + "</a>")
return HtmlWriter(os.path.join(self.filepath, relative_path + ".html"))
def close(self):
self.write("</body>\n</html>\n")
class NullHtmlWriter(BaseHtmlWriter):
def __init__(self):
BaseHtmlWriter.__init__(self)
self.filename = None
def write(self, s):
pass
def relative_to_full_path(self, relpath):
pass
class HtmlWriter(BaseHtmlWriter):
def __init__(self, filename, force_path_creation=True, flush_always=True):
BaseHtmlWriter.__init__(self)
self.filename = filename
self.filepath = os.path.dirname(filename)
self.flush_always = flush_always
if not os.path.exists(self.filepath):
if force_path_creation:
try:
os.mkdir(self.filepath)
except OSError:
pass
else:
raise Exception("cannot write to HTML file %s since the directory doesn't exist" % filename)
self.file = open(self.filename, "w")
self.write_header()
self.write_js(self.filepath)
def relative_to_full_path(self, relpath):
return self.filepath + "/" + relpath
def write(self, s):
if (self.file == None):
raise Exception("cannot write to this HTML since it is already closed")
self.file.write(s)
if (self.flush_always):
self.file.flush()
def __del__(self):
if self.file:
self.close()
def close(self):
BaseHtmlWriter.close(self)
self.file.flush()
self.file.close()
self.file = None
def test():
html_write = HtmlWriter("../res/test.html")
html_write.write("<h1>hello world</h1>\n")
if __name__ == '__main__':
test()
| eladnoor/optslope | src/html_writer.py | Python | mit | 9,669 |
from conf import paths
import scipy.io
import numpy as np
def load_train():
""" Loads all training data. """
tr_set = scipy.io.loadmat(file_name = paths.TR_SET)
tr_identity = tr_set['tr_identity']
tr_labels = tr_set['tr_labels']
tr_images = tr_set['tr_images']
return tr_identity, tr_labels, tr_images
def load_unlabeled():
""" Loads all unlabeled data."""
unlabeled_set = scipy.io.loadmat(file_name = paths.UNLABELED_SET)
unlabeled_images = unlabeled_set['unlabeled_images']
return unlabeled_images
def load_test():
""" Loads training data. """
test_set = scipy.io.loadmat(file_name = paths.TEST_SET)
test_images = test_set['public_test_images']
# hidden_set = scipy.io.loadmat(file_name = paths.HIDDEN_SET)
# hidden_images = hidden_set['hidden_test_images']
return test_images
| hinshun/smile | please/utils.py | Python | mit | 849 |
from django.contrib import admin
from Weather.models import *
from Weather.util import updateForecast
def update_forecast(modeladmin, request, queryset):
for forecast in queryset:
updateForecast(forecast)
update_forecast.short_description = "Force forecast update from NWS"
class forecastAdmin(admin.ModelAdmin):
actions = [update_forecast]
class WMSRadarOverlayAdmin(admin.ModelAdmin):
pass
admin.site.register(Forecast, forecastAdmin)
admin.site.register(WMSRadarOverlay, WMSRadarOverlayAdmin) | sschultz/FHSU-GSCI-Weather | Weather/admin.py | Python | mit | 524 |
from collections import Counter
def answer(q,inf):
s = Counter(q.split(' ')); r = [-1,-1]
for i,j in enumerate(inf):
check = sum(s.get(w,0) for w in j.split(' '))
if check != 0 and check > r[1]: r = [i,check]
return None if r == [-1,-1] else inf[r[0]]
| Orange9000/Codewars | Solutions/beta/beta_answer_the_students_questions.py | Python | mit | 293 |
import sys
params = open(sys.argv[1], 'r')
errors = open(sys.argv[2], 'r')
error = []
for line in errors:
if len(line) < 5: continue
error.append(map(float, line.replace('[', '').replace(']', '').split(',')))
print error
output = []
count = 0
paramCount = 0
roundValues = [4, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1]
for line in params:
out = ""
ln = line.replace(' ', '').split(",")
for i in range(len(ln)):
if ln[i] == '\n': continue
out += "$" + str(round(float(ln[i]), roundValues[paramCount])) + "\pm" + str(round(error[count][paramCount], roundValues[paramCount])) + "$ & "
paramCount += 1
output.append(out[0:len(out)-3])
# print out[0:len(out)-3]
if paramCount == 20:
count += 1
paramCount = 0
for i in output:
print i
wedge = range(9,24)
for i in range(4):
count = 0
for j in range(len(output)/5 + 1):
print str(wedge[count]) + " & " + output[j*5+i] + " \\\\"
print "\hline"
count += 1
print ""
| weissj3/MWTools | Scripts/MakeTableResultsandErrors.py | Python | mit | 1,034 |
from examples import acquire_token_by_username_password
from office365.graph_client import GraphClient
client = GraphClient(acquire_token_by_username_password)
groups = client.groups.get().top(1).execute_query()
for cur_grp in groups:
cur_grp.delete_object()
client.execute_batch()
| vgrem/Office365-REST-Python-Client | examples/directory/delete_groups_batch.py | Python | mit | 289 |
from base import ChoicesEnum
from _version import __version__
| tcwang817/django-choices-enum | django_choices_enum/__init__.py | Python | mit | 62 |
from .waveform_utils import omega, k, VTEMFun, TriangleFun, SineFun
from .current_utils import (
getStraightLineCurrentIntegral,
getSourceTermLineCurrentPolygon,
segmented_line_current_source_term,
)
| simpeg/simpeg | SimPEG/electromagnetics/utils/__init__.py | Python | mit | 212 |