repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
paultag/ftp-master-doc
|
doc/conf.py
|
Python
|
gpl-3.0
| 7,820
| 0.007545
|
# -*- coding: utf-8 -*-
#
# ftp-master-doc documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 4 21:44:03 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ftp-master-doc'
copyright = u'2014, Paul R. Tagliamonte'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# dire
|
ctories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func:
|
etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ftp-master-docdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ftp-master-doc.tex', u'ftp-master-doc Documentation',
u'Paul R. Tagliamonte', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ftp-master-doc', u'ftp-master-doc Documentation',
[u'Paul R. Tagliamonte'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ftp-master-doc', u'ftp-master-doc Documentation',
u'Paul R. Tagliamonte', 'ftp-master-doc', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
andersk/zulip
|
zerver/lib/actions.py
|
Python
|
apache-2.0
| 325,551
| 0.001554
|
import datetime
import hashlib
import itertools
import logging
import os
import time
from collections import defaultdict
from dataclasses import asdict, dataclass, field
from operator import itemgetter
from typing import (
IO,
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from django.utils.translation import override as override_language
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat
from analytics.models import RealmCount, StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
get_stream_cache_key,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_delivery_email_cache_key,
)
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
InvitationError,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
ZephyrMessageAlreadySentException,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MessageRenderingResult, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionBackend, MentionData, silent_mention_syntax_for_user
from zerver.lib.message import (
MessageDict,
SendMessageRequest,
access_message,
bulk_access_messages,
get_last_message_id,
normalize_body,
render_markdown,
truncate_topic,
update_first_visible_message_id,
wildcard_mention_allowed,
)
from zerver.lib.notification_data import UserMessageNotificationsData, get_user_group_mentions_data
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
SubInfo,
bulk_get_private_peers,
bulk_get_subscriber_peer_info,
get_active_subscriptions_for_stream_id,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_us
|
er,
get_subscribed_stream_ids_for_user,
get_subscriptions_for_send_message,
get_used_colors_for_user_ids,
get_user_ids_for_streams,
num_subscribers_for_strea
|
m_id,
subscriber_ids_with_stream_history_access,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_by_id,
access_stream_for_send_message,
can_access_stream_user_ids,
check_stream_access_based_on_stream_post_policy,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
get_stream_permission_policy_name,
get_web_public_streams_queryset,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.string_validation import check_stream_name, check_stream_topic
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
RESOLVED_TOPIC_PREFIX,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
messages_for_topic,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileDataElementValue, ProfileFieldData, UnspecifiedValue
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_mutes import add_user_mute, get_muting_users, get_user_mutes
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions, is_widget_message
from zerver.models import (
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
Draft,
EmailChangeStatus,
Message,
MultiuseInvite,
MutedUser,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
RealmUserDefault,
Recipient,
ScheduledEmail,
ScheduledMessage,
ScheduledMessageNotificationEmail,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
UserTopic,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_fake_email_domain,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_realm,
get_realm_playgrounds,
get_stream,
get_stream_by_id_in_realm,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
linkifiers_for_realm,
query_for_ids,
r
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/worksheet/test_write_worksheet.py
|
Python
|
bsd-2-clause
| 887
| 0.001127
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteWorksheet(unittest.TestCase):
"""
Test the Worksheet _write_worksheet() method.
"""
def setUp(self):
self.fh = StringIO()
|
self.worksheet = Worksheet()
self.worksheet._set
|
_filehandle(self.fh)
def test_write_worksheet(self):
"""Test the _write_worksheet() method"""
self.worksheet._write_worksheet()
exp = """<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
596acres/django-livinglots-template
|
project_name/project_name/settings/base.py
|
Python
|
gpl-3.0
| 8,268
| 0.002056
|
from collections import OrderedDict
import os
from os.path import abspath, dirname
from django.core.exceptions import ImproperlyConfigured
ENV_VARIABLE_PREFIX = 'LL'
def get_env_variable(var_name):
"""Get the environment variable or return exception"""
if not ENV_VARIABLE_PREFIX:
raise ImproperlyConfigured('Set ENV_VARIABLE_PREFIX')
try:
return os.environ[ENV_VARIABLE_PREFIX + '_' + var_name]
except KeyError:
error_msg = "Set the %s env variable" % var_name
raise ImproperlyConfigured(error_msg)
DATABASES = {
'default': {
# PostGIS < 2.0:
# > createdb -T template_postgis livinglots
# > psql
# # create user livinglots with password 'password';
# # grant all privileges on database livinglots to livinglots;
#
# PostGIS >= 2.0:
# > createdb livinglots
# > psql livinglots
# # create extension postgis;
# # create extension postgis_topology;
# # create user livinglots with password 'password';
# # grant all privileges on database livinglots to livinglots;
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': get_env_variable('DB_NAME'),
'USER': get_env_variable('DB_USER'),
'PASSWORD': get_env_variable('DB_PASSWORD'),
'HOST': get_env_variable('DB_HOST'),
'PORT': get_env_variable('DB_PORT'),
}
}
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('es', gettext('Spanish')),
)
LANGUAGE_CODE = 'en'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'America/New_York'
PROJECT_ROOT = os.path.join(abspath(dirname(__file__)), '..', '..')
DATA_ROOT = os.path.join(PROJECT_ROOT, 'data')
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'collected_static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = get_env_variable('SECRET_KEY')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'honeypot.middleware.HoneypotMiddleware',
'reversion.middleware.RevisionMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'feincms.context_processors.add_page_if_missing',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
#
# django contrib
#
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.gis',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.webdesign',
#
# third-party
#
'actstream',
'admin_enhancer',
'contact_form',
'django_monitor',
'djangojs',
'elephantblog',
'feincms',
'feincms.module.medialibrary',
'feincms.module.page',
'flatblocks',
'honeypot',
'imagekit',
'inplace',
'inplace.boundaries',
'inplace_activity_stream',
'jsonfield',
'mptt',
'reversion',
'reversion_compare',
'widget_tweaks',
#
# first-party, project-generic
#
'pagepermissions',
#
# Living Lots
#
'livinglots_lots',
'livinglots_notify',
'livinglots_organize',
'livinglots_owners',
'livinglots_pathways',
'livinglots_steward',
'livinglots_usercontent.files',
'livinglots_usercontent.notes',
'livinglots_usercontent.photos',
#
# first-party, project-specific
#
'activities',
'blog',
'cms',
'contact',
'groundtruth',
'lots',
'organize',
'owners',
'pathways',
'steward',
'usercontent',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
},
}
RECAPTCHA_PRIVATE_KEY = get_env_variable('RECAPTCHA_PRIVATE_KEY')
RECAPTCHA_PUBLIC_KEY = get_env_variable('RECAPTCHA_PUBLIC_KEY')
ORGANIZE_PARTICIPANT_SALT = get_env_variable('ORGANIZE_PARTICIPANT_SALT')
ACTSTREAM_SETTINGS = {
'MANAGER': 'inplace_activity_stream.managers.PlaceActionManager',
}
ACTIVITY_STREAM_DEFAULT_ACTOR_PK = get_env_variable('ACTSTREAM_DEFAULT_ACTOR_PK')
FACILITATORS = {
'global': [],
}
EMAIL_SUBJECT_PREFIX = '[Living Lots] '
MAILREADER_REPLY_PREFIX = 'Reply with text above this line to post a public note.'
MAILREADER_IGNORE_FROM = []
MAILREADER_HOST = get_env_variable('MAILREADER_HOST')
MAILREADER_HOST_USER = get_env_variable('MAILREADER_HOST_USER')
MAILREADER_HOST_PASSWORD = get_env_variable('MAILREADER_HOST_PASSWORD')
FEINCMS_RICHTEXT_INIT_TEMPLATE = 'admin/content/richtext/init_richtext.html'
FEINCMS_RICHTEXT_INIT_CONTEXT = {
'TINYMCE_JS_URL': STATIC_URL + 'bower_components/tinymce/js/tinymce/tinymce.js',
}
def elephantblog_entry_url_app(self):
from feincms.content.application.models import app_reverse
return app_reverse('elephantblog_entry_detail', 'elephantblog.urls',
kwargs={
'year': self.published_on.strftime('%Y'),
'month': self.published_on.strftime('%m'),
'day': self.published_on.strftime('%d'),
'slug': self.slug,
})
def elephantblog_categorytranslation_url_app(self):
from feincms.content.application.models import app_reverse
return app_reverse('elephantblog_category_detail', 'elephantblog.urls',
kwargs={ 'slug': self.slug, })
ABSOLUTE_URL_O
|
VERRIDES = {
'elephantblog.entry': elephantblog_entry_url_app,
'elephantblog.categorytranslation': elephantblog_categorytranslation_url_app,
}
SOUTH_MIGRATION_MODULES = {
'page': 'cms.migrate.page',
'medi
|
alibrary': 'cms.migrate.medialibrary',
}
HONEYPOT_FIELD_NAME = 'homepage_url'
HONEYPOT_VALUE = 'http://example.com/'
ADMIN_TOOLS_INDEX_DASHBOARD = '{{ project_name }}.admindashboard.LivingLotsDashboard'
LIVING_LOTS = {
'MODELS': {
'lot': 'lots.Lot',
'lotgroup': 'lots.LotGroup',
'organizer': 'organize.Organizer',
'owner': 'owners.Owner',
'pathway': 'pathways.Pathway',
},
}
# TODO replace with project reasons and email addresses
CONTACT_FORM_REASONS = OrderedDict([
('The lot I want permission to use is not here.', ['info@example.com',]),
('I want to share my land access story.', ['info@example.com',]),
('I want to loan or lease my land for a temporary project.', ['info@example.com',]),
('I want to invite admins to an event.', ['info@example.com',]),
('I want to reach 596 Acres, the team that made this site.
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_xdrlib.py
|
Python
|
epl-1.0
| 30
| 0
|
impo
|
rt xdrlib
xdrlib._te
|
st()
|
rigetticomputing/grove
|
grove/pyqaoa/maxcut_qaoa.py
|
Python
|
apache-2.0
| 4,142
| 0.002414
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Finding a maximum cut by QAOA.
"""
import networkx as nx
import numpy as np
from pyquil.api import get_qc
from pyquil.paulis import PauliTerm, PauliSum
from scipy.optimize import minimize
from grove.pyqaoa.qaoa import QAOA
def maxcut_qaoa(graph, steps=1, rand_seed=None, connection=None, samples=None,
initial_beta=None, initial_gamma=None, minimizer_kwargs=None,
vqe_option=None):
"""
Max cut set up method
:param graph: Graph definition. Either networkx or list of tuples
:param steps: (Optional. Default=1) Trotterization order for the QAOA algorithm.
:param rand_seed: (Optional. Default=None) random seed when beta and gamma angles
are not provided.
:param connection: (Optional) connection to the QVM. Default is None.
:param samples: (Optional. Default=None) VQE option. Number of samples
(circuit preparation and measurement) to use in operator averaging.
:param initial_beta: (Optional. Default=None) Initial guess for beta parameters.
:param initial_gamma: (Optional. Default=None) Initial guess for gamma parameters.
:param minimizer_kwargs: (Optional. Default=None). Minimizer optional arguments. If None set to
``{'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': False}``
:param vqe_option: (Optional. Default=None). VQE optional arguments. If None set to
``vqe_option = {'disp': print_fun, 'return_all': True, 'samples': samples}``
"""
if not isinstance(graph, nx.Graph) and isinstance(graph, list):
maxcut_graph = nx.Graph()
for edge in graph:
|
maxcut_graph.add_edge(*edge)
graph = ma
|
xcut_graph.copy()
cost_operators = []
driver_operators = []
for i, j in graph.edges():
cost_operators.append(PauliTerm("Z", i, 0.5)*PauliTerm("Z", j) + PauliTerm("I", 0, -0.5))
for i in graph.nodes():
driver_operators.append(PauliSum([PauliTerm("X", i, -1.0)]))
if connection is None:
connection = get_qc(f"{len(graph.nodes)}q-qvm")
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2,
'disp': False}}
if vqe_option is None:
vqe_option = {'disp': print, 'return_all': True,
'samples': samples}
qaoa_inst = QAOA(connection, list(graph.nodes()), steps=steps, cost_ham=cost_operators,
ref_ham=driver_operators, store_basis=True,
rand_seed=rand_seed,
init_betas=initial_beta,
init_gammas=initial_gamma,
minimizer=minimize,
minimizer_kwargs=minimizer_kwargs,
vqe_options=vqe_option)
return qaoa_inst
if __name__ == "__main__":
# Sample Run:
# Cutting 0 - 1 - 2 graph!
inst = maxcut_qaoa([(0, 1), (1, 2)],
steps=2, rand_seed=42, samples=None)
betas, gammas = inst.get_angles()
probs = inst.probabilities(np.hstack((betas, gammas)))
for state, prob in zip(inst.states, probs):
print(state, prob)
print("Most frequent bitstring from sampling")
most_freq_string, sampling_results = inst.get_string(betas, gammas)
print(most_freq_string)
|
clarammdantas/Online-Jugde-Problems
|
online_judge_solutions/diferent_digits.py
|
Python
|
mit
| 250
| 0.068
|
#diferent_digits
while True:
try:
n1, n2 = map(int,raw_input(
|
).split())
|
n_casas = n2 - n1 + 1
n = 0
for i in range(n1, n2 + 1):
c = str(i)
c_nrep = set(c)
if len(c) != len(c_nrep):
n += 1
print n_casas - n
except: break
|
evanthebouncy/nnhmm
|
mnist_haar/check_data.py
|
Python
|
mit
| 735
| 0.006803
|
from data import *
from draw import *
img, hiden_x = get_img_class()
print img.shape
print img
d_idx = np.random.randint(0, 50)
x_x, obs_x, obs_y, o
|
bs_tfs, new_ob_x, new_ob_y, new_ob_tf, imgs = gen_data()
print show_dim(x_x)
print
|
show_dim(obs_x)
print show_dim(obs_y)
print show_dim(obs_tfs)
print show_dim(new_ob_x)
print show_dim(new_ob_y)
print show_dim(new_ob_tf)
obss = zip([np.argmax(obx[d_idx]) for obx in obs_x],
[np.argmax(oby[d_idx]) for oby in obs_y],
[obtf[d_idx] for obtf in obs_tfs])
obss = [((x[0],x[1]), x[2]) for x in obss]
print "hidden number value ", np.argmax(x_x[d_idx])
draw_obs(obss, "test_obs.png")
img = imgs[d_idx]
draw(np.reshape(img, [L,L,1]), "test_orig.png")
print img
|
akrzos/cfme_tests
|
cfme/tests/infrastructure/test_provisioning.py
|
Python
|
gpl-2.0
| 7,759
| 0.003093
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.common.provider import cleanup_vm
from cfme.provisioning import do_vm_provisioning
from cfme.services import requests
from cfme.web_ui import fill
from utils import normalize_text, testgen
from utils.blockers import BZ
from utils.log import logger
from utils.mgmt_system import RHEVMSystem
from utils.wait import wait_for
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.meta(blockers=[
BZ(
1265466,
unblock=lambda provider: not isinstance(provider.mgmt, RHEVMSystem))
]),
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.infra_providers(metafunc,
required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
@pytest.fixture(scope="function")
def vm_name():
vm_name = 'test_tmpl_prov_{}'.format(fauxfactory.gen_alphanumeric())
return vm_name
@pytest.mark.tier(1)
def test_provision_from_template(rbac_role, configure_ldap_auth_mode, setup_provider, provider,
vm_name, smtp_test, request, provisioning):
""" Tests provisioning from a template
Metadata:
test_flag: provision
suite: infra_provisioning
rbac:
roles:
default:
evmgroup-super_administrator:
evmgroup-administrator:
evmgroup-operator: NoSuchElementException
evmgroup-auditor: NoSuchElementException
"""
# generate_tests makes sure these have values
template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
provisioning_data = {
'vm_name': vm_name,
'host_name': {'name': [host]},
'datastore_name': {'name': [datastore]}
}
# Same thing, different names. :\
if provider.type == 'rhevm':
provisioning_data['provision_type'] = 'Native Clone'
elif provider.type == 'virtualcenter':
provisioning_data['provision_type'] = 'VMware'
try:
provisioning_data['vlan'] = provisioning['vlan']
except KeyError:
# provisioning['vlan'] is required for rhevm provisioning
if provider.type == 'rhevm':
raise pytest.fail('rhevm requires a vlan value in provisioning info')
do_vm_provisioning(template, provider, vm_name, provisioning_data, request, smtp_test,
num_sec=900)
@pytest.mark.parametrize("edit", [True, False], ids=["edit", "approve"])
def test_provision_approval(
setup_provider, provider, vm_name, smtp_test, request, edit, provisioning):
""" Tests provisioning approval. Tests couple of things.
* Approve manually
* Approve by editing the request to conform
Prerequisities:
* A provider that can provision.
* Automate role enabled
* User with e-mail set so you can receive and view them
Steps:
* Create a provisioning request that does not get automatically approved (eg. ``num_vms``
bigger than 1)
* Wait for an e-mail to come, informing you that the auto-approval was unsuccessful.
* Depending on whether you want to do manual approval or edit approval, do:
* MANUAL: manually approve the request in UI
* EDIT: Edit the request in UI so it conforms the rules for auto-approval.
* Wait for an e-mail with approval
* Wait until the request finishes
* Wait until an email, informing about finished provisioning, comes.
Metadata:
test_flag: provision
suite: infra_provisioning
"""
# generate_tests makes sure these have values
template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))
# It will provision two of them
vm_names = [vm_name + "001", vm_name + "002"]
request.addfinalizer(
lambda: [cleanup_vm(vmname, provider) for vmname in vm_names])
provisioning_data = {
'vm_name': vm_name,
'host_name': {'name': [host]},
'datastore_name': {'name': [datastore]},
'num_vms': "2",
}
# Same thing, different names. :\
if provider.type == 'rhevm':
provisioning_data['provision_type'] = 'Native Clone'
elif provider.type == 'virtualcenter':
provisioning_data['provision_type'] = 'VMware'
try:
provisioning_data['vlan'] = provisioning['vlan']
except KeyError:
# provisioning['vlan'] is required for rhevm provisioning
if provider.type == 'rhevm':
raise pytest.fail('rhevm requires a vlan value in provisioning info')
do_vm_provisioning(template, provider, vm_name, provisioning_data, request, smtp_test,
wait=False)
wait_for(
lambda:
len(filter(
lambda mail:
"your request for a new vms was not autoapproved" in normalize_text(mail["subject"]),
smtp_test.get_emails())) > 0,
num_sec=90, delay=5)
wait_for(
lambda:
len(filter(
lambda mail:
"virtual machine request was not approved" in normalize_text(mail["subject"]),
smtp_test.get_emails())) > 0,
num_sec=90, delay
|
=5)
cells = {'Description': 'Provision from [{}] to [{}###]'.format(template, vm_name)}
wait_for(lambda: requests.go_to_request(cells), num_sec=80,
|
delay=5)
if edit:
# Automatic approval after editing the request to conform
with requests.edit_request(cells) as form:
fill(form.num_vms, "1")
new_vm_name = vm_name + "_xx"
fill(form.vm_name, new_vm_name)
vm_names = [new_vm_name] # Will be just one now
cells = {'Description': 'Provision from [{}] to [{}]'.format(template, new_vm_name)}
check = "vm provisioned successfully"
request.addfinalizer(
lambda: cleanup_vm(new_vm_name, provider))
else:
# Manual approval
requests.approve_request(cells, "Approved")
vm_names = [vm_name + "001", vm_name + "002"] # There will be two VMs
request.addfinalizer(
lambda: [cleanup_vm(vmname, provider) for vmname in vm_names])
check = "request complete"
wait_for(
lambda:
len(filter(
lambda mail:
"your virtual machine configuration was approved" in normalize_text(mail["subject"]),
smtp_test.get_emails())) > 0,
num_sec=120, delay=5)
# Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key)
wait_for(
lambda: all(map(provider.mgmt.does_vm_exist, vm_names)),
handle_exception=True, num_sec=600)
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=1500, delay=20)
assert normalize_text(row.last_message.text) == check
# Wait for e-mails to appear
def verify():
return (
len(filter(
lambda mail:
"your virtual machine request has completed vm {}".format(normalize_text(vm_name))
in normalize_text(mail["subject"]),
smtp_test.get_emails())) == len(vm_names)
)
wait_for(verify, message="email receive check", delay=5)
|
schnittstabil/findd
|
findd/cli/views.py
|
Python
|
mit
| 1,481
| 0
|
import logging
from shlex import quote
from findd.cli.widgets import hr
from findd.cli.widgets import ProgressBarManager
__LOG__ = logging.getLogger(__name__)
class BaseView(object):
def __init__(self, show_progressbars):
self.pbm = ProgressBarManager() if show_progressbars else None
def __enter__(self):
if self.pbm is not None:
self.pbm.__enter__()
return self
def __exit__(self, *args):
if self.pbm is not None:
self.pbm.__exit__(*args)
class InitView(BaseView):
def __init__(self):
BaseView.__init__(self, __LOG__.isEnabledFor(logging.INFO))
class UpdateView(BaseView):
def __init__(self):
BaseView.__init__(self, __LOG__.isEnabledFor(logging.INFO))
class ListDuplicatesView(BaseView):
def __init__(self):
BaseView.__init__(self, False)
def print_duplicates(self, duplicates):
print(' '.j
|
oin([quote(afile.relpath) for afile in duplicates]))
class ProcessDuplicatesView(BaseView):
def __init__(self):
BaseView.__init__(self, __LOG__.isEnabledFor(logging.INFO))
def print_subprocess_call(self, args):
__LOG__.debug(' '.join(args))
d
|
ef print_duplicates(self, duplicates):
if __LOG__.isEnabledFor(logging.INFO):
print(hr(' processed duplicates '))
paths = [quote(afile.relpath) for afile in duplicates]
for path in paths:
print(path)
print(hr())
|
alexsavio/aizkolari
|
matrans.py
|
Python
|
bsd-3-clause
| 10,313
| 0.029089
|
#!/usr/bin/python
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#README:
#Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps.
#You can make a list of measures and they will be applied in order.
#A list of the implemented measures are listed below.
#Geodesic anisotropy equation was extracted from
#P. G. Batchelor et al. - A Rigorous Framework for Diffusion Tensor Calculus - Magnetic Resonance in Medicine 53:221-225 (2005)
# What is tensor denoising?
#Log-Euclidean tensor denoising was used to eliminate singular, negative definite, or rank-deficient tensors
#-------------------------------------------------------------------------------
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import argparse, os, sys
from time import clock
import nibabel as nib
import numpy as np
from scipy.linalg import logm
from scipy.linalg.matfuncs import sqrtm
from numpy.linalg import det
from numpy.linalg import eigvals
from numpy.linalg import eigvalsh
#-----------------------------------------------------
|
--------------------------
#definining measure functions
def mylogm (v):
return np.reshape(logm(v.reshape(N,N)), [1,N*N])
#-------------------------------------------------------------------------------
def mydet (v):
return det(v.reshape(N,N))
#
|
-------------------------------------------------------------------------------
def mytrace (v):
return np.trace(v.reshape(N,N))
#-------------------------------------------------------------------------------
def myeigvals (v):
return eigvals(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvals (v):
return max (myeigvals(v))
#-------------------------------------------------------------------------------
def myeigvalsh (v):
return eigvalsh(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvalsh (v):
return max (myeigvalsh(v))
#-------------------------------------------------------------------------------
def mydeftensor (v):
j = v.reshape([N,N])
s = sqrtm(j.transpose()*j)
return S.reshape([1,N*N])
#-------------------------------------------------------------------------------
def mygeodan (v):
s = logm(v.reshape(N,N))
return np.sqrt(np.trace(np.square(s - np.trace(s)/N * np.eye(N))))
#-------------------------------------------------------------------------------
def calculate_measures (funcs, data, odims):
for i in range(len(funcs)):
measure = funcs[i]
odim = odims[i]
data = measure(data)
return data
#-------------------------------------------------------------------------------
def set_parser():
parser = argparse.ArgumentParser(description='Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps. \n You can make a list of measures and they will be applied in order. \n A list of the implemented measures are listed below.', prefix_chars='-')
parser.add_argument('-i', '--in', dest='infile', required=True,
help='Jacobian matrix volume (4DVolume with 9 volumes)')
parser.add_argument('-m', '--mask', dest='maskfile', required=False,
help='Mask file')
parser.add_argument('-o', '--out', dest='outfile', required=True,
help='Output file name')
parser.add_argument('-N', '--dims', dest='dims', required=False, default=3, type=int,
help='Order of the matrices in the volume')
parser.add_argument('--matlog', dest='funcs', action='append_const', const='matlog',
help='Matrix logarithm')
parser.add_argument('--deftensor', dest='funcs', action='append_const', const='deftensor',
help='Deformation tensor S=sqrtm(J`*J)')
parser.add_argument('--det', dest='funcs', action='append_const', const='det',
help='Determinant')
parser.add_argument('--trace', dest='funcs', action='append_const', const='trace',
help='Trace')
parser.add_argument('--eigvals', dest='funcs', action='append_const', const='eigvals',
help='Eigenvalues of a general matrix')
parser.add_argument('--maxeigvals', dest='funcs', action='append_const', const='maxeigvals',
help='Maximum eigenvalue of a general matrix')
parser.add_argument('--eigvalsh', dest='funcs', action='append_const', const='eigvalsh',
help='Eigenvalues of a Hermitian or real symmetric matrix')
parser.add_argument('--maxeigvalsh', dest='funcs', action='append_const', const='maxeigvalsh',
help='Maximum eigenvalue of a Hermitian or real symmetric matrix')
parser.add_argument('--geodan', dest='funcs', action='append_const', const='geodan',
help='Geodesic anisotropy: sqrt(trace(matlog(S) - (trace(matlog(S))/N)*eye(N))^2, where N==3 ')
return parser
#Geodesic anisotropy from:
#COMPARISON OF FRACTIONAL AND GEODESIC ANISOTROPY IN DIFFUSION TENSOR IMAGES OF 90 MONOZYGOTIC AND DIZYGOTIC TWINS
#Agatha D. Lee1, Natasha Lepore1, Marina Barysheva1, Yi-Yu Chou1, Caroline Brun1, Sarah K. Madsen1, Katie L. McMahon2, 1 Greig I. de Zubicaray2, Matthew Meredith2, Margaret J. Wright3, Arthur W. Toga1, Paul M. Thompson
#http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.3274
#-------------------------------------------------------------------------------
## START MATRIX TRANSFORMATIONS
#-------------------------------------------------------------------------------
def main():
#parsing arguments
parser = set_parser()
#parsing arguments
try:
args = parser.parse_args ()
except argparse.ArgumentError, exc:
print (exc.message + '\n' + exc.argument)
parser.error(str(msg))
return -1
ifile = args.infile.strip()
ofile = args.outfile.strip()
maskf = args.maskfile.strip()
funcs = args.funcs
#setting the global variable that indicates the order of the matrices
global N
N = args.dims
#loading file and preprocessing
iinfo = nib.load(ifile)
affine = iinfo.get_affine()
minfo = nib.load(maskf)
if len(iinfo.shape) != 4:
err = 'File ' + ifile + ' should be a 4D volume'
print(err)
return -1
#global variable N (for the nested functions)
N = np.sqrt(iinfo.shape[3])
if not N % 1 == 0:
err = 'File ' + ifile + ' should have N volumes along its 4th dimension, where N is an exponent of 2.'
print(err)
return -1
try:
#deciding what function to use
# and indicating size of 4th dimension of output
myfuncs = {}
odims = np.empty(len(funcs), dtype=int)
for i in range(len(funcs)):
if funcs [i] == 'matlog':
myfuncs[i] = mylogm
odims [i] = N
elif funcs[i] == 'det':
myfuncs[i] = mydet
odims [i] = 1
elif funcs[i] == 'trace':
myfuncs[i] = mytrace
odims [i] = 1
elif funcs[i] == 'deftensor':
myfuncs[i] = mydeftensor
odims [i] = N
elif funcs[i] == 'eigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 3
elif funcs[i] == 'eigvals':
myfuncs[i] = myeigvals
odims [i] = 3
elif funcs[i] == 'maxeigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 1
elif funcs[i] == 'maxeigvals':
myfuncs[i] = myeigvals
odims [i] = 1
elif funcs[i] == 'geodan':
myfuncs[i] = mygeodan
odims [i] = 1
#reading input data
img = iinfo.get_data()
mask = minfo.get_data
|
KryoEM/relion2
|
python/star/replace_ctf.py
|
Python
|
gpl-2.0
| 2,295
| 0.031808
|
# Replaces the ctf values in input star file with the values in a reference star file
import argparse
import os
from star import *
def parse_args():
parser = argparse.ArgumentParser(description="Replaces the ctf values in input star file with the values in a reference star file.")
parser.add_argument('--input', metavar='f1', type=str, nargs=1, required=True, help="particle file whose ctf values will be changed")
parser.add_argument('--reference', metavar='f2', type=str, nargs=1, required=True, help="particle file whose ctf values will be used as a reference")
parser.add_argument('--output', metavar='o', type=str, nargs=1, help="output file name")
return parser.parse_args()
def main(reference_path,input_path):
# parameters that are relevant to the CTF estimation
ctf_params = ['DefocusU','DefocusV','DefocusAngle','CtfFigureOfMerit','SphericalAberration','AmplitudeContrast']
# dictionary of micrograph name to ctf values
# key = micrograph name
# value = ctf values in 4-ple: DefocusU, DefocusV, DefocusAngle, CtfFOM
mic_to_ctf = {}
output = ''
print "Reading in reference CTF estimates"
ref_star = starFromPath(reference_path)
params_to_replace = [ params for params in ctf_params if params in ref_star.lookup ]
for line in ref_star.body:
mic_root = rootname(ref_star.getMic(line))
if mic_root in mic_to_ctf:
continue
else:
mic_to_ctf[mic_root] = ref_star.valuesOf(params_to_replace, line)
print "Reading input file"
input_star = starFromPath(input_path)
output += input_star.textHeader()
fields_to_replace = input_star.numsOf( params_to_replace )
for line in input_star.body:
values = line.split()
mic_root = rootname(input_star.valueOf('MicrographName',line))
fo
|
r index,field in enumerate(fields_to_replace):
values[field] = mic_to_ctf[mic_root][index] or values[field]
output += makeTabbedLine(values)
return output
if __name__ == '__main__':
args = parse_args()
input_path = args.input[0]
reference_path = args.reference[0]
if args
|
.output:
output_path = args.output[0]
else:
root, ext = os.path.splitext(input_path)
output_path = root + '_replace_ctf' + ext
main(reference_path, input_path)
with open(output_path, 'w') as output_file:
output_file.write(output)
print "Done!"
|
andrewhead/Package-Qualifiers
|
migrate/0002_add_column_task_mode.py
|
Python
|
mit
| 386
| 0.002591
|
#! /usr/bin/env python
#
|
-*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from playhouse.migrate import migrate
from peewee import TextField
logger = logging.getLogger('data')
def forward(migrator):
migr
|
ate(
migrator.add_column('task', 'mode', TextField(default='uninitialized')),
migrator.add_index('task', ('mode',), False),
)
|
kxepal/phoxpy
|
phoxpy/tests/modules/__init__.py
|
Python
|
bsd-3-clause
| 216
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Al
|
exander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should ha
|
ve received as part of this distribution.
#
|
datacommonsorg/data
|
scripts/eurostat/regional_statistics_by_nuts/population_density/PopulationDensity_preprocess_gen_tmcf.py
|
Python
|
apache-2.0
| 3,311
| 0.000604
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import io
import csv
_DATA_URL = "https://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?file=data/demo_r_d3dens.tsv.gz"
_CLEANED_CSV = "./PopulationDensity_Eurostat_NUTS3.csv"
_TMCF = "./PopulationDensity_Eurosta
|
t_NUTS3.tmcf"
_OUTPUT_COLUMNS = [
'Date',
'GeoId',
'Count_Person_PerArea',
]
def translate_wide_to_long(data_url):
df = pd.read_csv(data_url, delimiter='\t')
assert df.head
header = list(df.columns.values)
years = header[1:]
# Pandas.melt() unpivots a DataFrame from wide format to long format.
df = pd.melt(df,
id_vars=header[0],
value_vars=years,
|
var_name='time',
value_name='value')
# Separate geo and unit columns.
new = df[header[0]].str.split(",", n=1, expand=True)
df['geo'] = new[1]
df['unit'] = new[0]
df.drop(columns=[header[0]], inplace=True)
# Remove empty rows, clean values to have all digits.
df = df[df.value.str.contains('[0-9]')]
possible_flags = [' ', ':', 'b', 'e']
for flag in possible_flags:
df['value'] = df['value'].str.replace(flag, '')
df['value'] = pd.to_numeric(df['value'])
return (df)
def preprocess(df, cleaned_csv):
with open(cleaned_csv, 'w', newline='') as f_out:
writer = csv.DictWriter(f_out,
fieldnames=_OUTPUT_COLUMNS,
lineterminator='\n')
writer.writeheader()
for _, row in df.iterrows():
writer.writerow({
# 'Date': '%s-%s-%s' % (row_dict['TIME'][:4], '01', '01'),
'Date': '%s' % (row['time'][:4]),
'GeoId': 'dcid:nuts/%s' % row['geo'],
'Count_Person_PerArea': float(row['value']),
})
def get_template_mcf():
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:EurostatNUTS3_DensityTracking->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
observationAbout: C:EurostatNUTS3_DensityTracking->GeoId
observationDate: C:EurostatNUTS3_DensityTracking->Date
value: C:EurostatNUTS3_DensityTracking->{stat_var}
measurementMethod: "EurostatRegionalStatistics"
"""
stat_vars = _OUTPUT_COLUMNS[2:]
with open(_TMCF, 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i,
'stat_var': _OUTPUT_COLUMNS[2:][i]
}))
if __name__ == "__main__":
preprocess(translate_wide_to_long(_DATA_URL), _CLEANED_CSV)
get_template_mcf()
|
rohe/IdPproxy
|
src/idpproxy/social/linkedin/__init__.py
|
Python
|
bsd-2-clause
| 1,002
| 0.002994
|
import json
from idpproxy.social.oauth import OAuth
import oauth2 as oauth
#from xml.etree import ElementTree as ET
import logging
logger = logging.getLogger(__name__)
__author__ = 'rohe0002'
class LinkedIn(OAuth):
def __init__(self, client_id, client_secret, **kwargs):
OAuth.__init__(self, client_id, client_secret, **kwargs)
def get_profile(sel
|
f, info_set):
token = oauth.Token(key=info_set["oauth_token"][0],
secret=info_set["oauth_token_secret"][0])
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.extra["userinfo_endpoint"], "GET")
# # content in XML :-(
# logger.debug("UserInfo XML: %s" % content)
# res = {}
# root = ET.fromstring(content)
# for child
|
in root:
# res[child.tag] = child.text
res = json.loads(content)
logger.debug("userinfo: %s" % res)
res["user_id"] = info_set["oauth_token"]
return resp, res
|
mojwang/selenium
|
py/test/selenium/webdriver/common/results_page.py
|
Python
|
apache-2.0
| 1,408
| 0
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class ResultsPage(object):
"""This class models a google search result page."""
def __init__(self, driver):
self._driver = driver
def is_loaded(self):
return "/search" in self._driver.get_current_url()
d
|
ef load(self):
raise Exception("This page shouldn't be loaded directly")
def link_contains_match_for(self, term):
result_section = self._driver.find_element_by_id("res")
elements = result_section.find_elements_by_xpath(".//*[@class='l']")
for e in elements:
if
|
term in e.get_text():
return True
return False
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/contourcarpet/colorbar/_showticklabels.py
|
Python
|
mit
| 476
| 0
|
import
|
_plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="contourcarpet.colorbar",
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
|
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
ludoo/wpkit
|
attic/wpfrontman/wp_frontman/management/commands/wpf_maintenance.py
|
Python
|
bsd-3-clause
| 9,051
| 0.007071
|
import os
import sys
import time
import subprocess
import select
from optparse import make_option
from django.conf import settings
from django.db import connection
from django.core.cache import cache
from django.core.management.base i
|
mport BaseCommand, CommandError
from wp_frontman.blog import Blog
from wp_frontman.cache import cache_timestamps
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"--delete-revisions", action="store_true", dest="revisions", default=False,
help="delete revisions older than 48 hours"
),
make_option(
"--purge-cache", action="store_true", dest="cache", default=False,
|
help="purge stale files from the cache"
),
make_option(
"--publish-future-posts", action="store_true", dest="future", default=False,
help="publish posts that have been scheduled for past dates"
),
make_option(
"--wp-cron", action="store_true", dest="cron", default=False,
help="run wp cron"
),
)
usage = lambda s, sc: "Usage: ./manage.py %s [options] [blog_id_1 ... blog_id_n]" % sc
help = "Performs WP related management tasks."
requires_model_validation = False
def _message(self, m, double=None, line_char='-', verbosity=None):
if verbosity == '0':
return
if double in ('pre', 'both'):
print
print m
if line_char:
print len(m) * line_char
if double in ('post', 'both'):
print
def handle(self, *args, **opts):
verbosity = opts['verbosity']
if args:
try:
args = [int(a) for a in args]
except (TypeError, ValueError):
raise CommandError("Invalid blog id in arguments")
blogs = [b for b in Blog.get_blogs() if b.blog_id in args]
else:
blogs = Blog.get_blogs()
cursor = connection.cursor()
if opts['cron']:
self._message("Running wp-cron.php", 'pre', '=', verbosity=verbosity)
wp_root = Blog.site.wp_root
self._message("WP root '%s'" % wp_root, line_char='', verbosity=verbosity)
if not wp_root:
raise CommandError("No wp_root set for site")
if not os.path.isdir(wp_root):
raise CommandError("WP root '%s' is not a directory" % wp_root)
if not os.access(wp_root, os.R_OK|os.X_OK):
raise CommandError("No permissions to access WP root '%s'" % wp_root)
wp_cron = os.path.join(wp_root, 'wp-cron.php')
self._message("WP cron '%s'" % wp_cron, line_char='', verbosity=verbosity)
if not os.path.isfile(wp_cron):
raise CommandError("'%s' not found" % wp_cron)
if not os.access(wp_cron, os.R_OK):
raise CommandError("'%s' not readable" % wp_cron)
cwd = os.getcwd()
def _partial_read(proc, stout, stderr):
while (select.select([proc.stdout],[],[],0)[0]!=[]):
stdout += proc.stdout.read(1)
while (select.select([proc.stderr],[],[],0)[0]!=[]):
stderr += proc.stderr.read(1)
try:
os.chdir(wp_root)
for b in blogs:
path = b.path or '/'
self._message("blog %s: setting domain as '%s' and path as '%s'" % (b.blog_id, b.domain, path), line_char='', verbosity=verbosity)
os.environ.update(dict(HTTP_HOST=b.domain, REQUEST_URI=path+'wp-cron.php'))
try:
p = subprocess.Popen(["php", "-f", "wp-cron.php"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError, e:
raise CommandError("process error for blog %s, %s" % (b.blog_id, e))
continue
#stoud, stderr = '', ''
#while not p.poll():
# _partial_read(p, stdout, stderr)
#stdout = p.stdout.read()
if stdout:
print "output for blog %s:" % b.blog_id
print stdout
#stderr = p.stderr.read()
if stderr:
print "error for blog %s:" % b.blog_id
print stderr
retcode = p.returncode
if retcode:
print "non-zero returncode for blog %s: %s" % (b.blog_id, retcode)
if stdout or stderr or retcode:
print
finally:
os.chdir(cwd)
if opts['revisions']:
self._message("Removing revisions", 'pre', '=', verbosity=verbosity)
for b in blogs:
num = cursor.execute(
"delete from %sposts where post_type='revision' and post_status='inherit' and post_date <= now() - interval 2 day" % b.db_prefix
)
connection._commit()
self._message("blog %s: %s old revisions removed" % (b.blog_id, num), line_char='', verbosity=verbosity)
if opts['future']:
self._message("Publishing future posts", 'pre', '=', verbosity=verbosity)
for b in blogs:
num = cursor.execute(
"select ID, post_date, post_author from %sposts where post_type='post' and post_status='future' and post_date <= now()" % b.db_prefix
)
message = "blog %s: no future posts found" % b.blog_id
if num:
rows = cursor.fetchall()
num = cursor.execute(
"update %sposts set post_status='publish' where post_type='post' and post_status='future' and post_date <= now()" % b.db_prefix
)
if num:
timestamp = time.time()
for post_id, post_date, author_id in rows:
d = dict(id=post_id, date=post_date.strftime('%Y-%m-%d %H:%M:%S'), author_id=author_id)
cursor.execute("""
select t.term_taxonomy_id, t.taxonomy
from %sterm_relationships r
inner join %sterm_taxonomy t on t.term_taxonomy_id=r.term_taxonomy_id
where r.object_id=%%s
""" % (b.db_prefix, b.db_prefix), (post_id,))
d['taxonomy'] = dict((None, dict(zip(('id', 'taxonomy'), r))) for r in cursor.fetchall())
cache_timestamps(b.blog_id, 'post', d, timestamp)
message = "blog %s: %s future posts published" % (b.blog_id, num)
else:
message = "blog %s: no future posts published" % b.blog_id
connection._commit()
self._message(message, line_char='', verbosity=verbosity)
if opts['cache']:
cache_dir = getattr(cache, '_dir', None)
if not cache_dir:
raise CommandError("Cache backend %s does not support purging." % cache.__class__.__name__)
if not os.path.isdir(cache_dir):
raise CommandError("Cache directory %s not found." % cache_dir)
if not os.access(cache_dir, os.R_OK|os.W_OK|os.R_OK):
raise CommandError("Canno access cache directory %s." % cache_dir)
self._message("Removing stale cache files", 'pre', '=', verbosity=verbosity)
limit = time.time() - settings.CACHE_MIDDLEWARE_SECONDS
checked = purged = 0
for dirpath, dirnames, filenames in os.walk(cache_dir):
for fname in filenames:
|
MDU-PHL/ngmaster
|
setup.py
|
Python
|
gpl-2.0
| 1,595
| 0.000627
|
from setuptools import setup
from ngmaster import __version__
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(name='ngmaster',
version=__version__,
description='In silico multi-antigen sequence typing for Neisseria gonorrhoeae (NG-MAST)',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Intended Audience :: Science/Research',
],
|
keywords='microbial genomics Neisseria sequence typing',
url='https://github.com/MDU-PHL/ngmaster',
|
author='Jason Kwong',
author_email='kwongj@gmail.com',
license='GPLv3',
packages=['ngmaster'],
python_requires='>=3.6',
install_requires=[
'argparse',
'biopython',
'bs4',
'requests',
],
test_suite='nose.collector',
tests_require=[],
extra_require={
'dev': [
'bumpversion'
]
},
entry_points={
'console_scripts': ['ngmaster=ngmaster.run_ngmaster:main'],
},
include_package_data=True,
zip_safe=False)
|
daringer/pyORM
|
tests/field_ex_test.py
|
Python
|
gpl-2.0
| 1,954
| 0.013818
|
import os, sys
import time
import unittest
import operator as ops
sys.path.append("..")
from baserecord import BaseRecord
from fields import StringField, IntegerField, DateTimeField, \
OneToManyRelation, FloatField, OptionField, ManyToOneRelation, \
ManyToManyRelation
from core import Database
from field_expression import FieldExpression
|
class FieldExpressionTestSuite(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
|
def test_simple_exp_add(self):
x, y = 1, 2
for o in [ops.add, ops.sub, ops.and_, ops.or_, ops.eq, ops.lt, ops.le]:
f1 = FieldExpression(x, y, o)
self.assertTrue(f1.eval() == o(x, y), "failed: {}, ref: {}". \
format(o.__name__, o(x, y)))
def test_partly_evaluation(self):
x, y = 123, "varname"
f1 = FieldExpression(x, y, ops.add)
f2 = f1.eval()
self.assertTrue(isinstance(f1.eval(), FieldExpression))
self.assertTrue(f1.to_string() == "123 + varname")
f1.context["varname"] = 313
self.assertTrue(isinstance(f1.eval(), int))
self.assertTrue(f1.eval() == 436)
def test_multi_level(self):
x, y, z = 423, 324, 321
f1 = FieldExpression(x, y, ops.add)
f2 = FieldExpression(y, z, ops.add)
f3 = FieldExpression(f1, f2, ops.sub)
self.assertTrue( f3.eval() == ops.sub(ops.add(x, y), ops.add(y, z)) )
self.assertTrue( f3.to_string() == "(423 + 324) - (324 + 321)" )
def test_direct_operator_use(self):
x, y, z = 423, 324, 321
res = "(((423 + 324) + (324 + 321)) - (423 + 324)) + (324 + 321)"
f1 = FieldExpression(x, y, ops.add)
f2 = FieldExpression(y, z, ops.add)
f3 = ( ( f1 + f2 ) - f1) + f2
self.assertTrue( f3.eval() == 1290 )
self.assertTrue( f3.to_string() == res )
if __name__ == '__main__':
unittest.main()
|
miumok98/weblate
|
weblate/accounts/management/commands/changesite.py
|
Python
|
gpl-3.0
| 2,417
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRA
|
NTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
|
You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from optparse import make_option
class Command(BaseCommand):
help = 'changes default site name'
option_list = BaseCommand.option_list + (
make_option(
'--set-name',
type='str',
dest='set_name',
default=None,
help='site name to set'
),
make_option(
'--site-id',
type='int',
dest='site_id',
default=1,
help='site ID to manipulate (1 by default)'
),
make_option(
'--get-name',
action='store_true',
dest='get_name',
default=False,
help='just display the site name'
),
)
def handle(self, *args, **options):
if options['set_name']:
site, created = Site.objects.get_or_create(
pk=options['site_id'],
defaults={
'domain': options['set_name'],
'name': options['set_name']
}
)
if not created:
site.domain = options['set_name']
site.name = options['set_name']
site.save()
elif options['get_name']:
try:
site = Site.objects.get(pk=options['site_id'])
self.stdout.write(site.domain)
except Site.DoesNotExist:
raise CommandError('Site does not exist!')
else:
raise CommandError('Please specify desired action!')
|
Distrotech/bzr
|
bzrlib/tests/fake_command.py
|
Python
|
gpl-2.0
| 859
| 0
|
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the ho
|
pe that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin S
|
treet, Fifth Floor, Boston, MA 02110-1301 USA
from bzrlib.tests import test_commands
test_commands.lazy_command_imported = True
class cmd_fake(object):
pass
|
google-coral/project-keyword-spotter
|
mel_features.py
|
Python
|
apache-2.0
| 9,761
| 0.004815
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines routines to compute mel spectrogram features from audio waveform."""
import numpy as np
def frame(data, window_length, hop_length):
"""Convert array into a sequence of successive possibly overlapping frames.
An n-dimensional array of shape (num_samples, ...) is converted into an
(n+1)-D array of shape (num_frames, window_length, ...), where each frame
starts hop_length points after the preceding one.
This is accomplished using stride_tricks, so the original data is not
copied. However, there is no zero-padding, so any incomplete frames at the
end are not included.
Args:
data: np.array of dimension N >= 1.
window_length: Number of samples in each frame.
hop_length: Advance (in samples) between each window.
Returns:
(N+1)-D np.array with as many rows as there are complete frames that can be
extracted.
"""
num_samples = data.shape[0]
num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length))
shape = (num_frames, window_length) + data.shape[1:]
strides = (data.strides[0] * hop_length,) + data.strides
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
def periodic_hann(window_length):
"""Calculate a "periodic" Hann window.
The classic Hann window is defined as a raised cosine that starts and
ends on zero, and where every value appears twice, except the middle
point for an odd-length window. Matlab calls this a "symmetric" window
and np.hanning() returns it. However, for Fourier analysis, this
actually represents just over one cycle of a period N-1 cosine, and
thus is not compactly expressed on a length-N Fourier basis. Instead,
it's better to use a raised cosine that ends just before the final
zero value - i.e. a complete cycle of a period-N cosine. Matlab
calls this a "periodic" window. This routine calculates it.
Args:
window_length: The number of points in the returned window.
Returns:
A 1D np.array containing the periodic hann window.
"""
return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *
np.arange(window_length)))
def stft_magnitude(signal, fft_length,
hop_length=None,
window_length=None):
"""Calculate the short-time Fourier transform magnitude.
Args:
signal: 1D np.array of the input time-domain signal.
fft_length: Size of the FFT to apply.
hop_length: Advance (in samples) between each frame passed to FFT.
window_length: Length of each block of samples to pass to FFT.
Returns:
2D np.array where each row contains the magnitudes of the fft_length/2+1
unique values of the FFT for the corresponding frame of input samples.
"""
frames = frame(signal, window_length, hop_length)
# Apply frame window to each frame. We use a periodic Hann (cosine of period
# window_length) instead of the symmetric Hann of np.hanning (period
# window_length-1).
window = periodic_hann(window_length)
windowed_frames = frames * window
return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))
# Mel spectrum constants and functions.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def hertz_to_mel(frequencies_hertz):
"""Convert frequencies to mel
|
scale using HTK formula.
Args:
frequencies_hertz: Scalar or np.array of frequencies in hertz.
Returns:
Object of same size as frequencies_hertz containing corresponding values
on the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * np.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENC
|
Y_HERTZ))
def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Returns a np.array matrix A that can be used to post-multiply a matrix S of
spectrogram values (STFT magnitudes) arranged as frames x bins to generate a
"mel spectrogram" M of frames x num_mel_bins. M = S A.
The classic HTK algorithm exploits the complementarity of adjacent mel bands
to multiply each FFT bin by only one mel weight, then add it, with positive
and negative signs, to the two adjacent mel bands to which that bin
contributes. Here, by expressing this operation as a matrix multiply, we go
from num_fft multiplies per frame (plus around 2*num_fft adds) to around
num_fft^2 multiplies and adds. However, because these are all presumably
accomplished in a single call to np.dot(), it's not clear which approach is
faster in Python. The matrix multiplication has the attraction of being more
general and flexible, and much easier to read.
Args:
num_mel_bins: How many bands in the resulting mel spectrum. This is
the number of columns in the output matrix.
num_spectrogram_bins: How many bins there are in the source spectrogram
data, which is understood to be fft_size/2 + 1, i.e. the spectrogram
only contains the nonredundant FFT bins.
audio_sample_rate: Samples per second of the audio at the input to the
spectrogram. We need this to figure out the actual frequencies for
each spectrogram bin, which dictates how they are mapped into mel.
lower_edge_hertz: Lower bound on the frequencies to be included in the mel
spectrum. This corresponds to the lower edge of the lowest triangular
band.
upper_edge_hertz: The desired top edge of the highest frequency band.
Returns:
An np.array with shape (num_spectrogram_bins, num_mel_bins).
Raises:
ValueError: if frequency edges are incorrectly ordered or out of range.
"""
nyquist_hertz = audio_sample_rate / 2.
if lower_edge_hertz < 0.0:
raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > nyquist_hertz:
raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" %
(upper_edge_hertz, nyquist_hertz))
spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)
spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)
# The i'th mel band (starting from i=1) has center frequency
# band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge
# band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in
# the band_edges_mel arrays.
band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),
hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)
# Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins
# of spectrogram values.
mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))
for i in range(num_mel_bins):
lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the *mel* domain, not hertz.
lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /
(center_mel - lower_edge_mel))
upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /
(upper_edge_mel - center_mel))
# .. then intersect them with each other and zero.
mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,
|
codendev/rapidwsgi
|
src/mako/codegen.py
|
Python
|
gpl-3.0
| 39,495
| 0.007368
|
# codegen.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters
MAGIC_NUMBER = 5
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, parsetree.DefTag)
if self.in_def:
name = "render_" + node.name
args = node.function_decl.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline(
"_template_filename=%r" % self.compiler.filename)
self.printer.writeline("_template_uri=%r" % self.compiler.uri)
self.printer.writeline(
"_template_cache=cache.Cache(__name__, _modified_time)")
self.printe
|
r.writeline(
"_source_encoding=%r" % self
|
.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.wri
|
marcio-curl/EPUB-plasTeX
|
pacotes/amsmath.py
|
Python
|
lgpl-3.0
| 2,208
| 0.018569
|
#!/usr/bin/env python
from plasTeX import Command, Environment, sourceChildren
from plasTeX.Base.LaTeX.Arrays import Array
from plasTeX.Base.LaTeX.Math import EqnarrayStar, equation, eqnarray, MathEnvironment
#### Imports Added by Tim ####
from plasTeX.Base.LaTeX.Math import math
class pmatrix(Array):
pass
class _AMSEquation(eqnarray):
pass
class _AMSEquationStar(EqnarrayStar):
macroName = None
class align(_AMSEquation):
pass
class AlignStar(_AMSEquationStar):
macroName = 'align*'
class gather(_AMSEquation):
pass
class GatherStar(_AMSEqua
|
tionStar):
macroName = 'gather*'
class falign(_AMSEquation):
pass
class FAlignStar(_AMSEquationStar):
macroName = 'falign*'
class multiline(_AMSEquation):
pass
class MultilineStar(_AMSEquationStar):
macroName = 'multiline*'
class alignat(_AMSEquation):
pass
class AlignatStar(_AMSEquationStar):
macroName = 'alignat*'
|
class split(_AMSEquation):
pass
#### Added by Tim ####
class EquationStar(_AMSEquationStar):
macroName = 'equation*'
class aligned(_AMSEquation):
pass
class cases(_AMSEquation):
pass
class alignat(_AMSEquation):
args = 'column:int'
class AlignatStar(_AMSEquationStar):
args = 'column:int'
macroName = 'alignat*'
class flalign(_AMSEquation):
pass
class FlalignStar(_AMSEquationStar):
macroName = 'flalign*'
class subequations(_AMSEquation):
pass
class xalignat(alignat):
pass
class multline(multiline):
pass
class MultlineStar(MultilineStar):
macroName = 'multline*'
class matrix(Array):
pass
class vmatrix(Array):
pass
class Vmatrix(Array):
pass
class bmatrix(Array):
pass
class Bmatrix(Array):
pass
#### Inline Math
class smallmatrix(math):
pass
class dddot(math):
pass
class ddddot(math):
pass
class dots(Command):
pass
# "Transforma" o equation num displaymath
class equation(equation):
blockType = True
@property
def source(self):
if self.hasChildNodes():
return r'\[ %s \]' % sourceChildren(self)
if self.macroMode == Command.MODE_END:
return r'\]'
return r'\['
class eqref(Command):
args = 'label:idref'
|
yannrouillard/weboob
|
modules/lutim/browser.py
|
Python
|
agpl-3.0
| 1,503
| 0.000665
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Vincent A
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# webo
|
ob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www
|
.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser
from StringIO import StringIO
import re
from .pages import PageAll
__all__ = ['LutimBrowser']
class LutimBrowser(BaseBrowser):
ENCODING = 'utf-8'
def __init__(self, base_url, *args, **kw):
BaseBrowser.__init__(self, *args, **kw)
self.base_url = base_url
self.PAGES = {re.escape(self.base_url): PageAll}
def post(self, name, content, max_days):
self.location(self.base_url)
assert self.is_on_page(PageAll)
self.select_form(nr=0)
self.form['delete-day'] = [str(max_days)]
self.form.find_control('file').add_file(StringIO(content), filename=name)
self.submit()
assert self.is_on_page(PageAll)
return self.page.get_info()
|
TomBaxter/osf.io
|
admin/common_auth/admin.py
|
Python
|
apache-2.0
| 2,590
| 0.001544
|
from __future__ import absolute_import
from django.contrib import admin
from django.contrib.admin.models import DELETION
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.utils.html import escape
fro
|
m osf.models import AdminLogEntry
from osf.models import AdminProfile
class PermissionAdmin(admin.ModelAdmin):
search_fields = ['name', 'codename']
class AdminAdmin(admin.ModelAdmin):
def permission_groups(self):
|
perm_groups = ', '.join(
[perm.name for perm in self.user.groups.all()]) if self.user.groups.all() else 'No permission groups'
return u'<a href="/account/register/?id={id}">{groups}</a>'.format(id=self.user._id, groups=perm_groups)
def user_name(self):
return self.user.username
def _id(self):
return self.user._id
permission_groups.allow_tags = True
permission_groups.short_description = 'Permission Groups'
list_display = [user_name, _id, permission_groups]
admin.site.register(Permission, PermissionAdmin)
admin.site.register(AdminProfile, AdminAdmin)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = [f.name for f in AdminLogEntry._meta.get_fields()]
list_filter = [
'user',
'action_flag'
]
search_fields = [
'object_repr',
'change_message'
]
list_display = [
'action_time',
'user',
'object_link',
'object_id',
'message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
if obj.action_flag == DELETION:
link = escape(obj.object_repr)
elif obj.content_type is None:
link = escape(obj.object_repr)
else:
ct = obj.content_type
link = u'<a href="%s">%s</a>' % (
reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
return link
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def queryset(self, request):
return super(LogEntryAdmin, self).queryset(request) \
.prefetch_related('content_type')
# admin.site.register(AdminLogEntry, LogEntryAdmin)
|
baixuexue123/note
|
python/others/serialport/camera.py
|
Python
|
bsd-2-clause
| 3,086
| 0.010313
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"debug camera"
__author__ = "baixue"
from ctypes import *
import os, sys
from time import sleep
from binascii import unhexlify
import serial
DIR_ROOT = os.getcwd()
HEADER = 'AABB'
ADDR = '01'
RELAY_NO = ['%02d' % (i+1) for i in range(16)]
# config camera info
GROUP_A = (('01', '02'),
((12461150, 'L1'), (12442173, 'R1')),
os.path.jo
|
in(DIR_ROOT, u'A组相机'.encode('gbk')),
u'第一组相机')
GROUP_B = (('03
|
', '04'),
((12461130, 'L1'), (12492601, 'R1')),
os.path.join(DIR_ROOT, u'B组相机'.encode('gbk')),
u'第二组相机')
GROUP_C = (('05',),
((12461145, 'L1'), (13020874, 'R1')),
os.path.join(DIR_ROOT, u'C组相机'.encode('gbk')),
u'第三组相机')
CAMERA = (GROUP_A, GROUP_B, GROUP_C)
dll = cdll.LoadLibrary('GrabImage32_dll.dll')
def GrabImage(serialNum, path, imageName, shutter=47.574, gain=0.0, sleepTime=0, numImages=1):
serialNum = c_uint(serialNum)
strPath = c_char_p(path)
imageName = c_char_p(imageName)
shutter = c_float(shutter)
gain = c_float(gain)
sleepTime = c_int(sleepTime)
numImages = c_int(numImages)
ret = dll.GrabImage(serialNum, path, imageName, shutter, gain, sleepTime, numImages)
return ret
def create_dir():
for group in CAMERA:
image_dir = group[2]
if not os.path.exists(image_dir):
os.mkdir(image_dir)
def getPortList():
port_list = list(serial.tools.list_ports.comports())
if len(port_list) <= 0:
# the serial port can't find!
return 0
else:return [i[0] for i in port_list]
def generate_relay_cmd(relayNo, state=False):
if relayNo in RELAY_NO:
cmd = ''.join((HEADER, ADDR, relayNo, '01' if state else '00'))
elif relayNo == 'AllOpen':
cmd = ''.join((HEADER, ADDR, '1A01'))
elif relayNo == 'AllClose':
cmd = ''.join((HEADER, ADDR, '1C01'))
else:raise TypeError('parameter-relayNo is not correct')
chks = '%02x' % (sum(bytearray(unhexlify(cmd)))%256)
return cmd+chks
def main():
# create dir if non_exist
create_dir()
# check serial port
if getPortList==0:
print u'没有可用串口'
raw_input('按ENTER键退出...'.decode('utf-8').encode('gbk'))
return None
com = serial.Serial('com2', 9600, timeout=5)
com.flushInput()
com.flushOutput()
com.write(unhexlify(generate_relay_cmd('AllClose')))
for group in CAMERA:
print group[3]
for light in group[0]:
com.write(unhexlify(generate_relay_cmd(light, True)))
sleep(0.5)
for camera in group[1]:
ret = GrabImage(camera[0], group[2], camera[1])
for light in group[0]:
com.write(unhexlify(generate_relay_cmd(light, False)))
print
com.write(unhexlify(generate_relay_cmd('AllClose')))
com.close()
print u'完成!'
raw_input('按ENTER键退出...'.decode('utf-8').encode('gbk'))
return None
if __name__ == "__main__":
main()
sys.exit()
|
dawran6/flask
|
tests/test_views.py
|
Python
|
bsd-3-clause
| 6,085
| 0
|
# -*- coding: utf-8 -*-
"""
tests.views
~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import flask.views
from werkzeug.http import parse_set_header
def common_test(app):
c = app.test_client()
assert c.get('/').data == b'GET'
assert c.post('/').data == b'POST'
assert c.put('/').status_code == 405
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
assert sorted(meths) == ['GET', 'HEAD', 'OPTIONS', 'POST']
def test_basic_view(app):
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
common_test(app)
def test_method_based_view(app):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
common_test(app)
def test_view_patching(app):
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
vi
|
ew = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
common_test(app)
def test_view_inheritance(app):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
|
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
c = app.test_client()
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
assert sorted(meths) == ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST']
def test_view_decorators(app):
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
assert rv.headers['X-Parachute'] == 'awesome'
assert rv.data == b'Awesome'
def test_view_provide_automatic_options_attr():
app = flask.Flask(__name__)
class Index1(flask.views.View):
provide_automatic_options = False
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index1.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert rv.status_code == 405
app = flask.Flask(__name__)
class Index2(flask.views.View):
methods = ['OPTIONS']
provide_automatic_options = True
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index2.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['OPTIONS']
app = flask.Flask(__name__)
class Index3(flask.views.View):
def dispatch_request(self):
return 'Hello World!'
app.add_url_rule('/', view_func=Index3.as_view('index'))
c = app.test_client()
rv = c.open('/', method='OPTIONS')
assert 'OPTIONS' in rv.allow
def test_implicit_head(app):
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Blub'
assert rv.headers['X-Method'] == 'GET'
rv = c.head('/')
assert rv.data == b''
assert rv.headers['X-Method'] == 'HEAD'
def test_explicit_head(app):
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
assert rv.data == b'GET'
rv = c.head('/')
assert rv.data == b''
assert rv.headers['X-Method'] == 'HEAD'
def test_endpoint_override(app):
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with pytest.raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
common_test(app)
def test_multiple_inheritance(app):
class GetView(flask.views.MethodView):
def get(self):
return 'GET'
class DeleteView(flask.views.MethodView):
def delete(self):
return 'DELETE'
class GetDeleteView(GetView, DeleteView):
pass
app.add_url_rule('/', view_func=GetDeleteView.as_view('index'))
c = app.test_client()
assert c.get('/').data == b'GET'
assert c.delete('/').data == b'DELETE'
assert sorted(GetDeleteView.methods) == ['DELETE', 'GET']
def test_remove_method_from_parent(app):
class GetView(flask.views.MethodView):
def get(self):
return 'GET'
class OtherView(flask.views.MethodView):
def post(self):
return 'POST'
class View(GetView, OtherView):
methods = ['GET']
app.add_url_rule('/', view_func=View.as_view('index'))
c = app.test_client()
assert c.get('/').data == b'GET'
assert c.post('/').status_code == 405
assert sorted(View.methods) == ['GET']
|
lmaycotte/quark
|
quark/db/migration/alembic/cli.py
|
Python
|
apache-2.0
| 7,166
| 0
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import environment
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
HEAD_FILENAME = 'HEAD'
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_core_opts)
CONF.register_cli_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(six.text_type(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_head_file(config)
def add_alembic_subparser(sub, cmd):
return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
|
revision = CONF.command.revision or ''
if '-' in revision:
raise SystemExit(_('Negative relative revision (downgrade) not '
'supported'))
delta = CONF.comm
|
and.delta
if delta:
if '+' in revision:
raise SystemExit(_('Use either --delta or relative revision, '
'not both'))
if delta < 0:
raise SystemExit(_('Negative delta (downgrade) not supported'))
revision = '%s+%d' % (revision, delta)
if not CONF.command.sql:
run_sanity_checks(config, revision)
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def no_downgrade(config, cmd):
raise SystemExit(_("Downgrade no longer supported"))
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_head_file(config)
def validate_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
if (os.path.isfile(head_path) and
open(head_path).read().strip() == script.get_current_head()):
return
else:
alembic_util.err(_('HEAD file does not match migration timeline head'))
def update_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
with open(head_path, 'w+') as f:
f.write(script.get_current_head())
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = add_alembic_subparser(subparsers, name)
parser.set_defaults(func=do_alembic_command)
help_text = (getattr(alembic_command, 'branches').__doc__ +
' and validate head file')
parser = subparsers.add_parser('check_migration', help=help_text)
parser.set_defaults(func=do_check_migration)
parser = add_alembic_subparser(subparsers, 'upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.add_argument('--mysql-engine',
default='',
help='Change MySQL storage engine of current '
'existing tables')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('downgrade', help="(No longer supported)")
parser.add_argument('None', nargs='?', help="Downgrade not supported")
parser.set_defaults(func=no_downgrade)
parser = add_alembic_subparser(subparsers, 'stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = add_alembic_subparser(subparsers, 'revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def get_script_location(neutron_config):
location = '%s.db.migration:alembic_migrations'
base = "neutron"
return location % base
def run_sanity_checks(config, revision):
script_dir = alembic_script.ScriptDirectory.from_config(config)
def check_sanity(rev, context):
for script in script_dir.iterate_revisions(revision, rev):
if hasattr(script.module, 'check_sanity'):
script.module.check_sanity(context.connection)
return []
with environment.EnvironmentContext(config, script_dir,
fn=check_sanity,
starting_rev=None,
destination_rev=revision):
script_dir.run_env()
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), '../alembic.ini')
)
config.set_main_option('script_location',
'quark.db.migration:alembic')
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF()
# TODO(gongysh) enable logging
CONF.command.func(config, CONF.command.name)
|
SGenheden/Scripts
|
Membrane/water_leakage.py
|
Python
|
mit
| 3,011
| 0.008303
|
# Author: Samuel Genheden, samuel.genheden@gmail.com
"""
Program to calculate how many water molecules are leaking into the membrane
"""
import argparse
import math
import numpy as np
from sgenlib import parsing
from sgenlib import mol
def _count_water_inside(dens1, dens2, fi, li, fx, lx) :
return sum(dens1[fi:li])
def _count_water_inside2(dens1, dens2, fi, li, fx, lx, **kwargs) :
return sum(kwargs["watdens"][fi:li])
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="Calculate water leakage")
parser.add_argument('-f', '--file', help="the density file from g_density")
parser.add_argument('-w', '--watdens', help="the water density")
parser.add_argument('-l', '--lipdens', help="the lipid density")
parser.add_argument('-i', '--lipdens2', help="the lipid density")
parser.add_argument('-n', '--natom', type=int, help="the number of atoms in the lipid")
parser.add_argument('-t', '--natom2', type=int, help="the number of atoms in the lipid")
parser.add_argument('-a', '--area', type=float, help="membrane area")
args = parser.parse_args()
xvals, densities, ylabel = parsing.parse_densityfile(args.file)
dx = (xvals[1] - xvals[0])
lenz = xvals[-1]+dx-xvals[0]
factor = len(xvals) / (args.area * lenz)
densities[args.watdens] /= factor
nwater = np.sum(densities[args.watdens])
print "N water = %d "%(np.round(nwater))
densities[args.lipdens] /= (factor * args.natom)
nlipid = np.sum(densities[args.lipdens])
print "N lipids = %d "%(np.round(nlipid))
densities[args.lipdens2] /= (factor * args.natom2)
nlipid = np.sum(densities[args.lipdens2])
print "N lipids = %d "%(np.round(nlipid))
midi = int(np.ceil(xvals.shape[0]/2.0))
fi, li = mol.density_intercept(densities[args.watdens], densities[args.lipdens])
(fx_std, lx_std), in_std = mol.bootstrap_density_intercept(densities[args.watdens],
densities[args.lipdens], xvals, nboots=100, user_func=_count_water_inside)
print "Water and lipid density crosses at: %.3f %.3f"%(xvals[fi], fx_std)
print "Water and lipid d
|
ensity crosses at: %.3f %.3f"%(xv
|
als[li], lx_std)
print "\nNumber of leaked water: %d %d"%(_count_water_inside(
densities[args.watdens], densities[args.lipdens],
fi, li, xvals[fi], xvals[li]),
in_std)
fi, li = mol.density_intercept(densities[args.lipdens2], densities[args.lipdens])
(fx_std, lx_std), in_std = mol.bootstrap_density_intercept(densities[args.lipdens2],
densities[args.lipdens], xvals, nboots=100, user_func=_count_water_inside2,
watdens=densities[args.watdens])
print "\nLipid densities crosses at: %.3f %.3f"%(xvals[fi], fx_std)
print "Lipid densities crosses at: %.3f %.3f"%(xvals[li], lx_std)
print "\nNumber of leaked2 water: %d %d"%(_count_water_inside2(None, None,
fi, li, xvals[fi], xvals[li], watdens=densities[args.watdens]),
in_std)
|
adamkh/micropython
|
tests/basics/string_format.py
|
Python
|
mit
| 4,990
| 0.001603
|
# Change the following to True to get a much more comprehensive set of tests
# to run, albeit, which take considerably longer.
full_tests = False
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("}}{{")
test("{}-{}", 1, [4, 5])
test("{0}-{1}", 1, [4, 5])
test("{1}-{0}", 1, [4, 5])
test("{:x}", 1)
test("{!r}", 2)
test("{:x}", 0x10)
test("{!r}", "foo")
test("{!s}", "foo")
test("{0!r:>10s} {0!s:>10s}", "foo")
test("{:4b}", 10)
test("{:4c}", 48)
test("{:4d}", 123)
test("{:4n}", 123)
test("{:4o}", 123)
test("{:4x}", 123)
test("{:4X}", 123)
test("{:4,d}", 12345678)
test("{:#4b}", 10)
test("{:#4o}", 123)
test("{:#4x}", 123)
test("{:#4X}", 123)
test("{:#4d}", 0)
test("{:#4b}", 0)
test("{:#4o}", 0)
test("{:#4x}", 0)
test("{:#4X}", 0)
test("{:<6s}", "ab")
test("{:>6s}", "ab")
test("{:^6s}", "ab")
test("{:.1s}", "ab")
test("{: <6d}", 123)
test("{: <6d}", -123)
test("{:0<6d}", 123)
test("{:0<6d}", -123)
test("{:@<6d}", 123)
test("{:@<6d}", -123)
test("{:@< 6d}", 123)
test("{:@< 6d}", -123)
test("{:@<+6d}", 123)
test("{:@<+6d}", -123)
test("{:@<-6d}", 123)
test("{:@<-6d}", -123)
test("{:@>6d}", -123)
test("{:@<6d}", -123)
test("{:@=6d}", -123)
test("{:06d}", -123)
test("{:>20}", "foo")
test("{:^20}", "foo")
test("{:<20}", "foo")
# nested format specifiers
print("{:{}}".format(123, '#>10'))
print("{:{}{}{}}".format(123, '#', '>', '10'))
print("{0:{1}{2}}".format(123, '#>', '10'))
print("{text:{align}{width}}".format(text="foo", align="<", width=20))
print("{text:{align}{width}}".format(text="foo", align="^", widt
|
h=10))
print("{text:{align}{width}}".format(text="foo", align=">", width=30))
print("{foo}/foo".format(foo="bar"))
print("{}".format(123, foo="bar"))
print("{}-{foo}".format(123, foo="bar"))
def test_fmt(conv, fill, alignment, sign, prefix, width, precision, type, arg):
fmt = '{'
if conv:
fmt += '!'
fmt += conv
fmt += ':'
if alignment:
fmt += fill
fmt += alignment
fmt += sign
fmt += prefix
fmt += width
if precision:
|
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
if fill == '0' and alignment == '=':
fmt = '{:'
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
int_nums = (-1234, -123, -12, -1, 0, 1, 12, 123, 1234, True, False)
int_nums2 = (-12, -1, 0, 1, 12, True, False)
if full_tests:
for type in ('', 'b', 'd', 'o', 'x', 'X'):
for width in ('', '1', '3', '5', '7'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
for prefix in ('', '#'):
for num in int_nums:
test_fmt('', fill, alignment, sign, prefix, width, '', type, num)
if full_tests:
for width in ('', '1', '2'):
for alignment in ('', '<', '>', '^'):
for fill in ('', ' ', '0', '@'):
test_fmt('', fill, alignment, '', '', width, '', 'c', 48)
if full_tests:
for conv in ('', 'r', 's'):
for width in ('', '1', '4', '10'):
for alignment in ('', '<', '>', '^'):
for fill in ('', ' ', '0', '@'):
for str in ('', 'a', 'bcd', 'This is a test with a longer string'):
test_fmt(conv, fill, alignment, '', '', width, '', 's', str)
# tests for errors in format string
try:
'{0:0}'.format('zzz')
except (ValueError):
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'}'.format('zzzz')
except ValueError:
print('ValueError')
# end of format parsing conversion specifier
try:
'{!'.format('a')
except ValueError:
print('ValueError')
# unknown conversion specifier
try:
'abc{!d}'.format('1')
except ValueError:
print('ValueError')
try:
'{abc'.format('zzzz')
except ValueError:
print('ValueError')
# expected ':' after specifier
try:
'{!s :}'.format(2)
except ValueError:
print('ValueError')
try:
'{}{0}'.format(1, 2)
except ValueError:
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'{ 0 :*^10}'.format(12)
except KeyError:
print('KeyError')
try:
'{0}{}'.format(1)
except ValueError:
print('ValueError')
try:
'{}{}'.format(1)
except IndexError:
print('IndexError')
try:
'{0:+s}'.format('1')
except ValueError:
print('ValueError')
try:
'{0:+c}'.format(1)
except ValueError:
print('ValueError')
try:
'{0:s}'.format(1)
except ValueError:
print('ValueError')
try:
'{:*"1"}'.format('zz')
except ValueError:
print('ValueError')
# unknown format code for str arg
try:
'{:X}'.format('zz')
except ValueError:
print('ValueError')
|
synergeticsedx/deployment-wipro
|
lms/djangoapps/branding/tests/test_api.py
|
Python
|
agpl-3.0
| 4,864
| 0.005345
|
# encoding: utf-8
"""Tests of Branding API """
from __future__ import unicode_literals
from django.test import TestCase
import mock
from branding.api import get_logo_url, get_footer
from django.test.utils import override_settings
class TestHeader(TestCase):
"""Test API end-point for retrieving the header. """
def test_cdn_urls_for_logo(self):
# Ordinarily, we'd use `override_settings()` to override STATIC_URL,
# which is what the staticfiles storage backend is using to construct the URL.
# Unfortunately, other parts of the system are caching this value on module
# load, which can cause other tests to fail. To ensure that this change
# doesn't affect other tests, we patch the `url()` method directly instead.
cdn_url = "http://cdn.example.com/static/image.png"
with mock.patch('branding.api.staticfiles_storage.url', return_value=cdn_url):
logo_url = get_logo_url()
self.assertEqual(logo_url, cdn_url)
class TestFooter(TestCase):
"""Test retrieving the footer. """
@mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True})
@mock.patch.dict('django.conf.settings.MKTG_URLS', {
"ROOT": "https://edx.org",
"ABOUT": "/about-us",
"NEWS": "/news-announcements",
"CONTACT": "/contact",
"FAQ": "/student-faq",
"BLO
|
G": "/edx-blog",
"DONATE": "/donate",
"JOBS": "/jobs",
"SITE_MAP": "/sitemap",
"TOS_AND_HONOR": "/edx-terms-service",
"PRIVACY": "/e
|
dx-privacy-policy",
"ACCESSIBILITY": "/accessibility",
"MEDIA_KIT": "/media-kit",
"ENTERPRISE": "/enterprise"
})
@override_settings(PLATFORM_NAME='\xe9dX')
def test_get_footer(self):
actual_footer = get_footer(is_secure=True)
expected_footer = {
'copyright': '\xa9 \xe9dX. All rights reserved except where noted. EdX, Open edX and the edX and Open'
' EdX logos are registered trademarks or trademarks of edX Inc.',
'navigation_links': [
{'url': 'https://edx.org/about-us', 'name': 'about', 'title': 'About'},
{'url': 'https://edx.org/enterprise', 'name': 'enterprise', 'title': '\xe9dX for Business'},
{'url': 'https://edx.org/edx-blog', 'name': 'blog', 'title': 'Blog'},
{'url': 'https://edx.org/news-announcements', 'name': 'news', 'title': 'News'},
{'url': 'https://support.example.com', 'name': 'help-center', 'title': 'Help Center'},
{'url': 'https://edx.org/contact', 'name': 'contact', 'title': 'Contact'},
{'url': 'https://edx.org/donate', 'name': 'donate', 'title': 'Donate'}
],
'legal_links': [
{'url': 'https://edx.org/edx-terms-service',
'name': 'terms_of_service_and_honor_code',
'title': 'Terms of Service & Honor Code'},
{'url': 'https://edx.org/edx-privacy-policy', 'name': 'privacy_policy', 'title': 'Privacy Policy'},
{'url': 'https://edx.org/accessibility',
'name': 'accessibility_policy',
'title': 'Accessibility Policy'},
{'url': 'https://edx.org/sitemap', 'name': 'sitemap', 'title': 'Sitemap'},
{'url': 'https://edx.org/media-kit', 'name': 'media_kit', 'title': 'Media Kit'}
],
'social_links': [
{'url': '#', 'action': 'Like \xe9dX on Facebook', 'name': 'facebook',
'icon-class': 'fa-facebook-square', 'title': 'Facebook'},
{'url': '#', 'action': 'Follow \xe9dX on Twitter', 'name': 'twitter',
'icon-class': 'fa-twitter', 'title': 'Twitter'},
{'url': '#', 'action': 'Subscribe to the \xe9dX YouTube channel',
'name': 'youtube', 'icon-class': 'fa-youtube', 'title': 'Youtube'},
{'url': '#', 'action': 'Follow \xe9dX on LinkedIn', 'name': 'linkedin',
'icon-class': 'fa-linkedin-square', 'title': 'LinkedIn'},
{'url': '#', 'action': 'Follow \xe9dX on Google+', 'name': 'google_plus',
'icon-class': 'fa-google-plus-square', 'title': 'Google+'},
{'url': '#', 'action': 'Subscribe to the \xe9dX subreddit',
'name': 'reddit', 'icon-class': 'fa-reddit', 'title': 'Reddit'}
],
'mobile_links': [],
'logo_image': 'https://edx.org/static/images/logo.png',
'openedx_link': {
'url': 'http://open.edx.org',
'image': 'https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png',
'title': 'Powered by Open edX'
}
}
self.assertEqual(actual_footer, expected_footer)
|
liuzz1983/open_vision
|
openvision/facenet/align/bulk_detec_face.py
|
Python
|
mit
| 9,832
| 0.00356
|
def bulk_detect_face(images, detection_window_size_ratio, model, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: caffemodel
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = model.pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = model.rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
|
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rne
|
t_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = model.onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_w
|
Xiaofei-Zhang/NAMD_Docking_pipeline
|
pre_DOCKING/prepare_receptors.py
|
Python
|
mit
| 1,588
| 0.015113
|
# prepare_receptors.py
# Create the .pdbqt files and receptors coordinates file of receptors
# for VinaMPI Docking
# Usage:
# python prepare_receptors.py
#
# Specify the correct paths of prepare_receptor4.py pythonsh VMD
# Make sure the get_AS_grid.tcl file uses the correct residue number
# of the active sites
# Run the scripts in the folder contains all receptors .pdb file
#
# Output: .pdbqt file for each .pdb
# receptors.txt: used in VinaMPI
#
# Authors: Xiaofei Zhang, Sally R. Ellingson
# Date: June 21 2016
import os, glob, sys, shlex, subprocess
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
# Set the path of prepare_receptor4.py
prepReceptor='/Users/Xiaofei/Documents/2016SpringRA/mgltools_i86Darwin9_1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py'
# Set the path of pythonsh
pythonsh='/Users/Xiaofei/Documents/2016SpringRA/mgltools_i86Darwin9_1.5.6/bin/pythonsh'
# Set the path of VMD
vmd='/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86'
receptor_list=glob.glob('*.pdb')
# Create pdbqt files
for pdbfile in receptor_list:
pdbqtfile = pdbfile[:-3]+'pdbqt'
os.system(pythonsh + ' ' + prepReceptor + ' -r ' + pdbfile + ' -o ' +
|
pdbqtfile +' -A hydrogens')
# Create receptors.txt f
|
ile
with open('receptors.txt','w') as f:
f.write('receptor size_x size_y size_z center_x center_y center_z cpu=1\n')
for pdbfile in receptor_list:
pdbid = pdbfile[:-4]
os.system('\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'get_AS_grid.tcl' + ' ' + '-args' + ' '+ pdbid)
|
pcubillos/pytips
|
pytips/__init__.py
|
Python
|
mit
| 491
| 0.002037
|
# Copyright (c)
|
2015-2019 Patricio Cubillos and contributors.
# pytips is open-source software under the MIT license (see LICENSE).
from .tips import __all__
from .tips import *
# Clean up top-level namespace--delete everything that isn't in __all__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __all__ ):
del locals()[varname]
d
|
el(varname)
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/nodes/boolean/invert_node.py
|
Python
|
gpl-3.0
| 368
| 0.002717
|
impo
|
rt bpy
from ... base_types.node import AnimationNode
class InvertNode(bpy.types.Node, AnimationNode):
bl_idname = "an_InvertNode"
bl_label = "Invert Boolean"
def create(self):
self.newInput("Boolean", "Input", "input")
self.newOutput("Boolean", "Output", "output")
def getExe
|
cutionCode(self):
return "output = not input"
|
mfussenegger/Huluobo
|
base.py
|
Python
|
mit
| 1,242
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler, HTTPError
from schema import Session, Feed
from jinja2.exceptions import TemplateNotFound
class Base(RequestHandler):
@property
def env(self):
return self.application.env
def get_error_html(self, status_code, **kwargs):
try:
self.render('error/%s.html' % status_code)
except TemplateNotFound:
try:
self.render('error/50x.html', status_code=status_code)
except TemplateNotFound:
self.write('epic fail')
Session.close()
def on_finish(self):
Session.remove()
def render(self, template, **kwds):
try:
template = self.env.get_template(template)
except TemplateNotFound:
raise HTTPError(404)
kwds['feeds'] = Session.query(Feed).order_by(Feed.title)
self.env.globals['request'] = self.request
self.env.globa
|
ls['static_url']
|
= self.static_url
self.env.globals['xsrf_form_html'] = self.xsrf_form_html
self.write(template.render(kwds))
Session.close()
class NoDestinationHandler(Base):
def get(self):
raise HTTPError(404)
|
mlopes/LogBot
|
logbot/__init__.py
|
Python
|
mit
| 147
| 0
|
from l
|
ogbot.daemonizer import Daemonizer
from logbot.irc_client import IrcClient
from logbot.logger import Logger
from logbot.parser import Parser
| |
agundy/Whiteboard
|
trackSchool/trackSchool/settings/common.py
|
Python
|
mit
| 258
| 0.003876
|
REST_FRAMEWORK = {
# Use Django's standard `d
|
jango.contrib.auth` permissions,
# or allow read-only ac
|
cess for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
|
Gustry/inasafe
|
safe/gui/tools/wizard/step_kw49_inasafe_raster_default_values.py
|
Python
|
gpl-3.0
| 7,187
| 0
|
# coding=utf-8
"""InaSAFE Wizard Step InaSAFE Raster Default Fields."""
# noinspection PyPackageRequirements
import logging
from parameters.qt_widgets.parameter_container import ParameterContainer
from safe import messaging as m
from safe.utilities.i18n import tr
from safe.common.parameters.default_value_parameter import (
DefaultValueParameter)
from safe.common.parameters.default_value_parameter_widget import (
DefaultValueParameterWidget)
from safe.definitions.layer_purposes import (layer_purpose_aggregation)
from safe.definitions.utilities import get_fields, get_compulsory_fields
from safe.gui.tools.wizard.utilities import get_inasafe_default_value_fields
from safe.gui.tools.wizard.wizard_step import (
WizardStep, get_wizard_step_ui_class)
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
FORM_CLASS = get_wizard_step_ui_class(__file__)
LOGGER = logging.getLogger('InaSAFE')
class StepKwInaSAFERasterDefaultValues(WizardStep, FORM_CLASS):
"""InaSAFE Wizard Step InaSAFE Raster Default Fields."""
def __init__(self, parent=None):
"""Constructor for the tab.
:param parent: parent - widget to use as parent (Wizard Dialog).
:type parent: QWidget
"""
WizardStep.__init__(self, parent)
self.extra_parameters = [
(DefaultValueParameter, DefaultValueParameterWidget)
]
self.parameters = []
self.parameter_container = ParameterContainer(
extra_parameters=self.extra_parameters)
self.default_values_grid.addWidget(self.parameter_container)
def is_ready_to_next_step(self):
"""Check if the step is complete.
If so, there is no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
return True
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
new_step = self.parent.step_kw_source
return new_step
def inasafe_fields_for_the_layer(self):
"""Return a list of inasafe fields the current layer.
:returns: A list where each value represents inasafe field.
:rtype: list
"""
# Get hazard or exposure value
layer_purpose_key = self.parent.step_kw_purpose.selected_purpose()[
'key']
if layer_purpose_key != layer_purpose_aggregation['key']:
subcategory_key = self.parent.step_kw_subcategory.\
selected_subcategory()['key']
else:
subcategory_key = None
# Get all fields with replace_null = True
inasafe_fields = get_fields(
layer_purpose_key,
subcategory_key,
replace_null=True,
in_group=False)
# remove compulsory field since it has been set in previous step
try:
inasafe_fields.remove(get_compulsory_fields(
layer_purpose_key, subcategory_key))
except ValueError:
pass
return inasafe_fields
# noinspection PyTypeChecker
def set_widgets(self):
"""Set widgets on the Extra Keywords tab."""
existing_inasafe_default_values = self.parent.get_existing_keyword(
'inasafe_default_values')
# Remove old container and parameter
|
i
|
f self.parameter_container:
self.default_values_grid.removeWidget(
self.parameter_container)
if self.parameters:
self.parameters = []
# Iterate through all inasafe fields
# existing_inasafe_default_values
for inasafe_field in self.inasafe_fields_for_the_layer():
# Create DefaultSelectParameter
parameter = DefaultValueParameter()
parameter.guid = inasafe_field['key']
parameter.name = inasafe_field['name']
parameter.is_required = False
parameter.help_text = inasafe_field['default_value']['description']
# parameter.description = inasafe_field['default_value']
parameter.element_type = unicode
parameter.labels = get_inasafe_default_value_fields(
self.parent.setting, inasafe_field['key'])[0]
parameter.options = get_inasafe_default_value_fields(
self.parent.setting, inasafe_field['key'])[1]
if existing_inasafe_default_values:
existing_default_value = existing_inasafe_default_values.get(
inasafe_field['key'])
if existing_default_value:
parameter.default = existing_default_value
self.parameters.append(parameter)
# Create the parameter container and add to the wizard.
self.parameter_container = ParameterContainer(
self.parameters, extra_parameters=self.extra_parameters)
self.parameter_container.setup_ui()
self.default_values_grid.addWidget(self.parameter_container)
# Set default value to None
for parameter_widget in self.parameter_container.\
get_parameter_widgets():
parameter_widget.widget().set_value(None)
# Set default value from existing keywords
if existing_inasafe_default_values:
for guid, default in existing_inasafe_default_values.items():
parameter_widget = self.parameter_container.\
get_parameter_widget_by_guid(guid)
if isinstance(parameter_widget, DefaultValueParameterWidget):
parameter_widget.set_value(default)
def get_inasafe_default_values(self):
"""Return inasafe default from the current wizard state.
:returns: Dictionary of key and value from InaSAFE Default Values.
:rtype: dict
"""
inasafe_default_values = {}
parameters = self.parameter_container.get_parameters(True)
for parameter in parameters:
if parameter.value is not None:
inasafe_default_values[parameter.guid] = parameter.value
return inasafe_default_values
def clear(self):
"""Clear current state."""
# Adapted from http://stackoverflow.com/a/13103617/1198772
for i in reversed(range(self.default_values_grid.count())):
self.default_values_grid.itemAt(i).widget().setParent(None)
self.parameters = []
self.parameter_container = ParameterContainer()
def help_content(self):
"""Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'In this wizard step: {step_name}, you will be able to '
'set a value that corresponded with a InaSAFE field '
'concept as default value.').format(step_name=self.step_name)))
return message
|
seishei/multiprocess
|
py2.6/multiprocess/forking.py
|
Python
|
bsd-3-clause
| 14,448
| 0.001453
|
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
from multiprocess import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Try making some callable types picklable
#
try:
from dill import Pickler
except ImportError:
from pickle import Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
#def _reduce_builtin_function_or_method(m):
# return getattr, (m.__self__, m.__name__)
#ForkingPickler.register(type(list().append), _reduce_builtin_function_or_method)
#ForkingPickler.register(type(int().__add__), _reduce_builtin_function_or_method)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
clos
|
e = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
|
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import thread
import msvcrt
import _subprocess
import time
from _multiprocess import win32, Connection, PipeConnection
from .util import Finalize
try:
# from cPickle import dump, load, HIGHEST_PROTOCOL
from dill import load, DEFAULT_PROTOCOL as HIGHEST_PROTOCOL
except ImportError:
from pickle import load, HIGHEST_PROTOCOL
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.executable.lower().endswith('pythonservice.exe'):
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle),
|
calendall/calendall
|
calendall/profiles/migrations/0004_auto_20150117_1017.py
|
Python
|
bsd-3-clause
| 703
| 0.001422
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from dj
|
ango.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
|
('profiles', '0003_auto_20150115_1939'),
]
operations = [
migrations.AddField(
model_name='calendalluser',
name='location',
field=models.CharField(blank=True, max_length=30, verbose_name='User location'),
preserve_default=True,
),
migrations.AddField(
model_name='calendalluser',
name='url',
field=models.URLField(blank=True, verbose_name='User homepage'),
preserve_default=True,
),
]
|
zeekay/flask-uwsgi-websocket
|
flask_uwsgi_websocket/__init__.py
|
Python
|
mit
| 1,032
| 0.012597
|
'''
Flask-uWSGI-WebSocket
---------------------
High-performance WebSockets for your Flask apps powered by `uWSGI <http://uwsgi-docs.readthedocs.org/en/latest/>`_.
'''
__docformat__ = 'restructuredtext'
__version__ = '0.6.1'
__license__ = 'MIT'
__author__ = 'Zach Kelling'
import sys
from ._async import *
from ._uwsgi import uwsgi
from .websocket import *
|
class GeventNotInstalled(Exception):
pass
try:
from ._gevent import *
except ImportError:
class GeventWebSocket(object):
def __init__(self, *args, **kwargs):
raise GeventNotInstalled("Gevent must be installed to use GeventWebSocket. Try: `pip install gevent`.")
class AsyncioNotAvailable(Exception):
pass
try:
assert sys.version_info > (3,4)
from ._asyncio import *
except (AssertionError, ImportError):
class AsyncioWebSocket(object):
|
def __init__(self, *args, **kwargs):
raise AsyncioNotAvailable("Asyncio should be enabled at uwsgi compile time. Try: `UWSGI_PROFILE=asyncio pip install uwsgi`.")
|
walkinreeds/MultiProxies
|
lib/EchoColor.py
|
Python
|
gpl-3.0
| 1,788
| 0.002237
|
# coding=utf-8
__author__ = 'DM_'
import platform
import ctypes
import sys
USE_WINDOWS_COLOR = False
if platform.system() == "Windows":
USE_WINDOWS_COLOR = True
# #########################################
#windows color.
BLACK = 0x0
BLUE = 0x01
GREEN = 0x02
CYAN = 0x03
RED = 0x04
PURPLE = 0x05
YELLOW = 0x06
WHITE = 0x07
GREY = 0x08
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_text_color(color, handle=std_out_handle):
'''set color'''
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def resetColor():
set_cmd_text_color(RED | GREEN | BLUE)
else:
# #########################################
#linux color.
BLACK = '\033[0m'
BLUE = '\033[34m'
GREEN = '\033[32m'
CYAN = '\033[36m'
RED = '\033[31m'
PURPLE = '\033[35m'
YELLOW = '\033[33m'
WHITE = '\033[37m'
GREY = '\033[38m'
class _echocolor():
def echo(self, mess, color=None, append=False, verbose=False):
|
reset = False
from lib.ProxiesFunctions import isClientVerbose
from lib.ProxiesFunctions import isColor
if USE_WINDOWS_COLOR:
if color and isColor():
set_cmd_text_color(color | color | color)
reset = True
else:
if color and isColor():
mess = color + mess + BLACK
if isClientVerbose() or verbose:
if append:
sys.stdout.write(mess)
|
sys.stdout.flush()
else:
print(mess)
if reset:
resetColor()
color = _echocolor()
|
Lekensteyn/buildbot
|
master/buildbot/www/auth.py
|
Python
|
gpl-2.0
| 6,728
| 0.000297
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import re
from twisted.cred.checkers import FilePasswordDB
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.portal import IRealm
from twisted.cred.portal import Portal
from twisted.internet import defer
from twisted.web.error import Error
from twisted.web.guard import BasicCredentialFactory
from twisted.web.guard import DigestCredentialFactory
from twisted.web.guard import HTTPAuthSessionWrapper
from twisted.web.resource import IResource
from zope.interface import implementer
from buildbot.util import bytes2NativeString
from buildbot.util import config
from buildbot.www import resource
class AuthRootResource(resource.Resource):
def getChild(self, path, request):
# return dynamically generated resources
if path == b'login':
return self.master.www.auth.getLoginResource()
elif path == b'logout':
return self.master.www.auth.getLogoutResource()
return resource.Resource.getChild(self, path, request)
class AuthBase(config.ConfiguredMixin):
def __init__(self, userInfoProvider=None):
if userInfoProvider is None:
userInfoProvider = UserInfoProviderBase()
self.userInfoProvider = userInfoProvider
def reconfigAuth(self, master, new_config):
self.master = master
def maybeAutoLogin(self, request):
return defer.succeed(None)
def getLoginResource(self):
raise Error(501, "not implemented")
def getLogoutResource(self):
return LogoutResource(self.master)
@defer.inlineCallbacks
def updateUserInfo(self, request):
session = request.getSession()
if self.userInfoProvider is not None:
infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])
session.user_info.update(infos)
session.updateSession(request)
def getConfigDict(self):
return {'name': type(self).__name__}
class UserInfoProviderBase(config.ConfiguredMixin):
name = "noinfo"
def getUserInfo(self, username):
return defer.succeed({'email': username})
class LoginResource(resource.Resource):
def render_GET(self, request):
return self.asyncRenderHelper(request, self.renderLogin)
@defer.inlineCallbacks
def renderLogin(self, request):
raise NotImplementedError
class NoAuth(AuthBase):
pass
class RemoteUserAuth(AuthBase):
header = "REMOTE_USER"
headerRegex = re.compile(r"(?P<username>[^ @]+)@(?P<realm>[^ @]+)")
def __init__(self, header=None, headerRegex=None, **kwargs):
AuthBase.__init__(self, **kwargs)
if header is not None:
self.header = header
if headerRegex is not None:
self.headerRegex = re.compile(headerRegex)
@defer.inlineCallbacks
def maybeAutoLogin(self, request):
header = request.getHeader(self.header)
if header is None:
raise Error(403, "missing http header %s. Check your reverse proxy config!" % (
self.header))
res
|
= self.headerRegex.match(header)
if res is None:
raise Error(
403, 'http header does not match regex! "%s" not matching %s' %
(header, self.headerRegex.pattern))
session = request.getSession()
if session.user_info != dict(res.groupdict()):
session.user_info = dict(res.groupdict())
yield self.updateUserInfo(
|
request)
@implementer(IRealm)
class AuthRealm(object):
def __init__(self, master, auth):
self.auth = auth
self.master = master
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
return (IResource,
PreAuthenticatedLoginResource(self.master, avatarId),
lambda: None)
raise NotImplementedError()
class TwistedICredAuthBase(AuthBase):
def __init__(self, credentialFactories, checkers, **kwargs):
AuthBase.__init__(self, **kwargs)
self.credentialFactories = credentialFactories
self.checkers = checkers
def getLoginResource(self):
return HTTPAuthSessionWrapper(
Portal(AuthRealm(self.master, self), self.checkers),
self.credentialFactories)
class HTPasswdAuth(TwistedICredAuthBase):
def __init__(self, passwdFile, **kwargs):
TwistedICredAuthBase.__init__(
self,
[DigestCredentialFactory(b"md5", b"buildbot"),
BasicCredentialFactory(b"buildbot")],
[FilePasswordDB(passwdFile)],
**kwargs)
class UserPasswordAuth(TwistedICredAuthBase):
def __init__(self, users, **kwargs):
TwistedICredAuthBase.__init__(
self,
[DigestCredentialFactory(b"md5", b"buildbot"),
BasicCredentialFactory(b"buildbot")],
[InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],
**kwargs)
def _redirect(master, request):
url = request.args.get("redirect", ["/"])[0]
return resource.Redirect(master.config.buildbotURL + "#" + url)
class PreAuthenticatedLoginResource(LoginResource):
# a LoginResource which is already authenticated via a
# HTTPAuthSessionWrapper
def __init__(self, master, username):
LoginResource.__init__(self, master)
self.username = username
@defer.inlineCallbacks
def renderLogin(self, request):
session = request.getSession()
session.user_info = dict(username=bytes2NativeString(self.username))
yield self.master.www.auth.updateUserInfo(request)
raise _redirect(self.master, request)
class LogoutResource(resource.Resource):
def render_GET(self, request):
session = request.getSession()
session.expire()
session.updateSession(request)
request.redirect(_redirect(self.master, request).url)
return b''
|
fenceFoil/canopto
|
text.py
|
Python
|
bsd-3-clause
| 3,088
| 0.038212
|
#!/bin/python3
from Canopto import Canopto
import pygame
from pygame import *
from pygame.locals import *
import time
from colorsys import *
import sys
import json
import datetime
# scroll text across canopto. blocks. fg & bg are colors
def scrollText (canopto, text, fg = (0xFF, 0x33, 0xFF), bg = (0x00, 0x00, 0x00, 0x00), font = "16bfZX", yOffset = 2):
#TinyUnicode 16, offset 5
#Habbo 12, offset 0
#16bfZX 16, offset 2
#surface = pygame.font.SysFont("16bfZX", 16).render(text, False, fg, bg)
surface = pygame.font.Font(font+".ttf", 16).render(text, False, fg, bg)
scrollSurface(canopto, surface, speed = 1.2, yOffset = 2)
# speed is a multiplier of the default speed which is pretty good. Blocks until
# surface has finished scrolling
def scrollSurface (canopto, surface, speed = 1, yOffset = 0):
for x in range (0, surface.get_width()-(canopto.width-1)):
frame = Surface ((canopto.width, canopto.height))
frame.blit (surface, (-x, -yOffset))
canopto.drawSurface(frame)
canopto.update()
for event in pygame.event.get():
if event.type==QUIT:
sys.exit(0)
time.sleep((1/speed) * 0.07)
# Main
if __name__ == "__main__":
c = Canopto(8, 8, previewEnabled=True, useGamma=True)
#scrollText (c, "HI GEESE yeah you're pretty cool xDDDD :) Hi!");
scrollText (c, " trying small letters ");
# if __name__ == "__main__":
# display = Canopto (8, 8, previewEnabled=True, useGamma=True)
# CONFIG_PATH = 'config.json'
# # API KEYS FOUND HERE: https://www.twilio.com/user/account (NOT under DEV TOOLS > API KEYS)
# # Read API keys for Twilio from json config file (outside of git repository)
# # Or use environment variables as https://github.com/twilio/twilio-python
|
suggests
# with open(CONFIG_PATH) as json_config:
# config = json.load(json_config)
# ACCOUNT_SID = config['twilio']['account_sid']
# AUTH_TOKEN = config['twilio']['auth_token']
# print("Successfuly read api information from config")
# client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
# processedMessages = []
# processedIDs = []
# startTime = datetime.datetime.n
|
ow() + datetime.timedelta(hours=4)
# while (True): #display.tracker.running):
# #if tracker.frameCounter == 10:
# #tracker.resetToMotion = True
# #print "reset to motion"
# print ("hihi")
# #Because of a conflict between timezones used to represent dates cannot limit by day since messages sent after
# #~10pm will, according to server, be sent tomorrow thereby not having them show up as new messages if limited by today
# #date_sent=datetime.datetime.today()
# messages = client.messages.list()
# print ("hi")
# for message in messages:
# if (message.direction == "inbound"):
# #New message from now onward that hasn't already been processed
# if message.sid not in processedIDs and message.date_created > startTime:
# scrollText(display, " " + message.body + " ")
# processedIDs.append(message.sid)
# time.sleep(1)
# #Close down the main loops of the threads
# #tracker.running = False
# display.clear()
# display.running = False
|
freedesktop-unofficial-mirror/gstreamer-sdk__cerbero
|
cerbero/bootstrap/__init__.py
|
Python
|
lgpl-2.1
| 1,054
| 0
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
class BootstraperBase (object):
def __init__(self, config
|
):
self.config
|
= config
def start(self):
raise NotImplemented("'start' must be implemented by subclasess")
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2015_12_01/_feature_client.py
|
Python
|
mit
| 4,057
| 0.003451
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import FeatureClientConfiguration
from .operations import FeatureClientOperationsMixin, FeaturesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class FeatureClient(FeatureClientOperationsMixin):
"""Azure Feature Exposure Control (AFEC) provides a mechanism for the resource providers to control feature exposure to users. Resource providers typically use this mechanism to provide public/private preview for new features prior to making them generally available. Users need to explicitly register for AFEC features to get access to such functionality.
:ivar features: FeaturesOperations operations
:vartype features: azure.mgmt.resource.features.v2015_12_01.operations.FeaturesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = FeatureClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self.
|
_client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
|
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> FeatureClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
Just-D/chromium-1
|
tools/perf/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 4,403
| 0.010902
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/perf/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
import re
import sys
def _CommonChecks(input_api, output_api):
"""Performs common checks, which includes running pylint."""
results = []
old_sys_path = sys.path
try:
# Modules in tools/perf depend on telemetry.
sys.path = [os.path.join(os.pardir, 'telemetry')] + sys.path
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api, black_list=[], pylintrc='pylintrc'))
results.extend(_CheckJson(input_api, output_api))
results.extend(_CheckWprShaFiles(input_api, output_api))
finally:
sys.path = old_sys_path
return results
def _CheckWprShaFiles(input_api, output_api):
"""Check whether the wpr sha files have matching URLs."""
from catapult_base import cloud_storage
results = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if not filename.endswith('wpr.sha1'):
continue
expected_hash = cloud_storage.ReadHash(filename)
is_wpr_file_uploaded = any(
cloud_storage.Exists(bucket, expected_hash)
for bucket in cloud_storage.BUCKET_ALIASES.itervalues())
if not is_wpr_file_uploaded:
wpr_filename = filename[:-5]
results.append(output_api.PresubmitError(
'The file matching %s is not in Cloud Storage yet.\n'
'You can upload your new WPR archive file with the command:\n'
'depot_tools/upload_to_google_storage.py --bucket '
'<Your pageset\'s bucket> %s.\nFor more info: see '
'http://www.chromium.org/developers/telemetry/'
'record_a_page_set#TOC-Upload-the-recording-to-Cloud-Storage' %
(filename, wpr_filename)))
return results
def _CheckJson(input_api, output_api):
"""Checks whether JSON files in this change can be parsed."""
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if os.path.splitext(filename)[1] != '.json':
continue
try:
input_api.json.load(open(filename))
except ValueError:
return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]
return []
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def _IsBenchmarksModified(change):
"""Checks whether CL contains any modification to Telemetry benchmarks."""
for affected_file in change.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, _ = os.path.splitext(affected_file_path)
if (os.path.join('tools', 'perf', 'benchmarks') in file_path or
os.path.join('tools', 'perf', 'measurements') in file_path):
return True
return False
def PostUploadHook(cl, change, output
|
_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots list to the CL description in order to run
Telemetry benchmarks on Perf trybots in addtion to CQ trybots if the CL
contains any changes to Telemetry benchmarks.
"""
benchmarks_modified = _IsBenchmarksModified(change)
rietveld_obj = cl.RpcServer()
issue = cl.issue
original_description = rietveld_obj.get_description(issue)
if not benchmarks_modified or re.search(
r'^CQ
|
_EXTRA_TRYBOTS=.*', original_description, re.M | re.I):
return []
results = []
bots = [
'linux_perf_bisect',
'mac_perf_bisect',
'win_perf_bisect',
'android_nexus5_perf_bisect'
]
bots = ['tryserver.chromium.perf:%s' % s for s in bots]
bots_string = ';'.join(bots)
description = original_description
description += '\nCQ_EXTRA_TRYBOTS=%s' % bots_string
results.append(output_api.PresubmitNotifyResult(
'Automatically added Perf trybots to run Telemetry benchmarks on CQ.'))
if description != original_description:
rietveld_obj.update_description(issue, description)
return results
|
quintel/etmoses
|
scripts/power_to_heat/p2h_profile_generator.py
|
Python
|
mit
| 7,575
| 0.011485
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 11:09:48 2015
@author: joris.berkhout@quintel.com & chael.kruip@quintel.com
"""
#==============================================================================
# This script can be used to generate typical domestic hot water (DHW) profiles
# for a period of one year and time steps of 15 minutes. The script is based in
# great part on Realistic Domestic Hot-Water Profiles in Different Time Scales,
# Jordan (2001). This study assumes a daily average DHW use of 200 liters and
# distinguishes four types of DHW consumption, each with an associated volume
# and average daily occurence:
#
# type A: short load (1 liter per event, 28 occurences per day)
# type B: medium load (6 liter per event, 12 occurences per day)
# type C: bath (140 liter per event, 0.143 occurences per day (once a week))
# type D: shower (40 liter per event, 2 occurences per day)
#
# According to Jordan (2001), the duration of each of these types is shorter
# than 15 minutes (i.e. the time resolution of our simulation). Hence we
# decided to only model the probability that an event occurs within each 15
# minute time step and assign the entire volume of that event to that 15 minute
# bin. The probability of
|
each type of event varies throughout the year (i.e.
# slightly more DHW consumption in winter), throughout the week (more in the
# weekend) and throughout the day (no DHW consumption during the night).
#
# The script returns two types of profiles:
#- use profiles: randomly generated profiles with a time resolution of 15. To
# match the needs of ETMoses these exported profiles are scaled to the maximal
# storage volume of the boiler used in ETM for P2H
#- availability profiles: the profiles indi
|
cate how full the boiler has to be
# in order to meet future demands. The profiles are derived from the use
# profile by and are also expressed as a fraction of the maximal storage volume
#==============================================================================
import numpy as np
import pylab as plt
plt.close()
# Global variables
# volumes per type of event
volume_A = 1
volume_B = 6
volume_C = 140
volume_D = 40
# daily occurence per type of event
occ_A = 28
occ_B = 12
occ_C = 0.143
occ_D = 2
# The annual energy demand for DHW in The Netherlands in 2012
annual_energy_demand_DHW = 80e15 # Joule
# These numbers come from the ETM
number_of_inhabitants = 16730348
inhabitants_per_household = 2.2
daily_energy_demand_DHW_per_household = annual_energy_demand_DHW / 365 / number_of_inhabitants * inhabitants_per_household
# From Jordan (2001)
daily_DHW_volume = 200 # liters
# conversion from liters to energy
DHW_liters_to_energy = daily_energy_demand_DHW_per_household / daily_DHW_volume
# From the ETM, see https://github.com/quintel/etdataset/issues/599#issuecomment-98747401
specific_heat_H20 = 4.186 # Joule / gram / degrees
P2H_boiler_volume = 100 # liters
temp_diff = 95 - 15 # degrees
# HP capacity
heat_output_capacity = 2000 #W
# The factor 1000 is to convert from liters to grams
P2H_storage_volume = specific_heat_H20 * P2H_boiler_volume * 1000 * temp_diff # Joule
def gauss_function(x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
quarters = np.arange(0,35040.,1.)
# The probability per year follows a cosine function with an amplitude of 10%.
# This probability function is the same for a types of events
prob_year_ABCD = 0.5 + 0.05 * np.cos((quarters/len(quarters) - 45./365 )*2*np.pi)
# All types of events have an increasing probability of happening in the weekend
# Type C (bath) follows its own probability
# As 2013 started on a Tuesday, I shifted the values shown in Figure 1.5 of
# Jordan (2001)
prob_week_ABD = np.array([0.95, 0.95, 0.95, 0.98, 1.09, 1.13, 0.95])
prob_week_C = np.array([0.50, 0.50, 0.50, 0.80, 1.90, 2.30, 0.50])
# Each type of event follows its own probablity function during the week. I have
# recreated the probability functions shown in Figure 1.6 of Jordan (2001) below.
# Type A and B
prob_day_AB = np.zeros(96)
for i in range(5*4, 23*4):
prob_day_AB[i] = 1/18.
# Type C
prob_day_C = np.zeros(96)
for j in range(7*4, 23*4):
prob_day_C[j] = gauss_function(j, 0.06, 15*4., 20.)
for k in range(17*4, 21*4):
prob_day_C[j] = gauss_function(j, 0.22, 19*4., 5)
# Type D
prob_day_D = np.zeros(96)
for k in range(5*4, 9*4):
prob_day_D[k] = gauss_function(k, 0.25, 7*4., 4.)
for k in range(9*4, 18*4):
prob_day_D[k] = 0.02
for k in range(18*4, 21*4):
prob_day_D[k] = gauss_function(k, 0.085, 19.5*4., 4.)
for k in range(21*4, 23*4):
prob_day_D[k] = 0.02
# The probability for an event to happen is prob_year * prob_week * prob_day
# The following function can be used to construct the probability function for
# an entire year with time steps of 15 minutes
def annual_probability_curve(prob_day, prob_week, prob_year):
annual_probability = np.zeros(len(prob_year))
for i in range(0, len(prob_year)):
day_of_week = ( i / 96 ) % 7
hour_of_day = i % 96
annual_probability[i] = prob_year[i] * prob_week[day_of_week] * prob_day[hour_of_day]
# return the normalized probability function
return annual_probability / sum(annual_probability)
# Create the probabilities
prob_year_A = annual_probability_curve(prob_day_AB, prob_week_ABD, prob_year_ABCD)
prob_year_B = annual_probability_curve(prob_day_AB, prob_week_ABD, prob_year_ABCD)
prob_year_C = annual_probability_curve(prob_day_C, prob_week_C, prob_year_ABCD)
prob_year_D = annual_probability_curve(prob_day_D, prob_week_ABD, prob_year_ABCD)
# main loop
for j in range(0,10):
pattern_A = np.zeros(len(prob_year_A))
pattern_B = np.zeros(len(prob_year_B))
pattern_C = np.zeros(len(prob_year_C))
pattern_D = np.zeros(len(prob_year_D))
np.random.seed(j)
for i in range(0, len(pattern_A)):
# construct the random pattern for each type of event by taking onto account
# their probability, the number of events per day and the volume per event
pattern_A[i] = volume_A * np.random.choice((0,1),p=[1-prob_year_A[i]*occ_A*365, prob_year_A[i]*occ_A*365])
pattern_B[i] = volume_B * np.random.choice((0,1),p=[1-prob_year_B[i]*occ_B*365, prob_year_B[i]*occ_B*365])
pattern_C[i] = volume_C * np.random.choice((0,1),p=[1-prob_year_C[i]*occ_C*365, prob_year_C[i]*occ_C*365])
pattern_D[i] = volume_D * np.random.choice((0,1),p=[1-prob_year_D[i]*occ_D*365, prob_year_D[i]*occ_D*365])
# add all patterns to obtain a pattern in liters
pattern = pattern_A + pattern_B + pattern_C + pattern_D
# calculate pattern in energy terms
pattern = pattern * DHW_liters_to_energy
# calculate the pattern in relative terms by dividing by the maximum storage volume of the P2H boiler
pattern = pattern / P2H_storage_volume
# If the tank should be more than 100% full, we expect the gas-boiler to kick in and save the day
pattern = [x if x < 1 else 1 for x in pattern]
use_filename = '../output_data/p2h_use_profile_' + str(j+1) + '.csv'
plt.savetxt(use_filename, pattern, fmt='%.3f', delimiter=',')
mini = 0
maxi = 8760 * 4
x = np.arange(mini,maxi)
plt.step(x,pattern[mini:maxi],linewidth=3.0)
plt.xlabel('time (15 minutes steps)')
plt.ylabel('daily energy use for DHW consumption (fraction of tank)')
plt.title('pattern ' + str(j+1))
plt.show()
|
lalitkumarj/NEXT-psych
|
next/database/database_backup.py
|
Python
|
apache-2.0
| 2,053
| 0.026303
|
#!/usr/bin/python
"""
Every 30 minutes backs up database to S3. To recover the database, (i.e. reverse the process)
simply download the file from S3, un-tar it, and use the command:
(./)mongorestore --host {hostname} --port {port} path/to/dump/mongodump
where {hostname} and {port} are as they are below
"""
import sys
sys.path.append("/next_backend")
import time
import traceback
import next.utils as utils
|
import subprocess
import next.constants as constants
import os
NEXT_BACKEND_GLOBAL_HOST = os.environ.get('NEXT_BACKEND_GLOBAL_HOST', 'localhost')
AWS_BUCKET_NAME = os.environ.get('AWS_BUCKET_NAME','next-database-backups')
timestamp = utils.datetimeNow()
print "[ %s ] starting backup of MongoDB to S3..." % str(timestamp)
print "[ %s ] constants.AWS_ACCESS_ID = %s" % (str(timestamp),constants.AWS_ACCESS_ID)
subprocess.call('/usr/bin/mongodump -vvvvv --host {hostname}:{p
|
ort} --out /dump/mongo_dump'.format( hostname=constants.MONGODB_HOST, port=constants.MONGODB_PORT ),shell=True)
try:
tar_file = sys.argv[1]
except:
tar_file = 'mongo_dump_{hostname}_{timestamp}.tar.gz'.format( hostname=NEXT_BACKEND_GLOBAL_HOST, timestamp= timestamp.strftime("%Y-%m-%d_%H:%M:%S") )
subprocess.call('tar czf {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True)
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import boto
# boto.set_stream_logger('boto')
try:
conn = S3Connection(constants.AWS_ACCESS_ID,constants.AWS_SECRET_ACCESS_KEY)
b = conn.get_bucket(AWS_BUCKET_NAME)
k = Key(b)
k.key = tar_file
bytes_saved = k.set_contents_from_filename( '/dump/'+tar_file )
timestamp = utils.datetimeNow()
print "[ %s ] done with backup of MongoDB to S3... %d bytes saved" % (str(timestamp),bytes_saved)
except:
error = traceback.format_exc()
timestamp = utils.datetimeNow()
print "[ %s ] FAILED TO CONNECT TO S3... saving locally" % str(timestamp)
print error
subprocess.call('rm {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True)
|
TribeMedia/synapse
|
tests/api/test_filtering.py
|
Python
|
apache-2.0
| 15,549
| 0.000064
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from tests.utils import (
MockHttpResource, DeferredMockCallable, setup_test_homeserver
)
from synapse.api.filtering import Filter
from synapse.events import FrozenEvent
user_localpart = "test_user"
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return
|
FrozenEvent(kwargs)
class FilteringTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.mock_federation_resource = MockHttpResource()
self.mock_http_client = Mock(spec=[])
self.mock_http_client.put_json = DeferredMockCallable()
hs = yield setup_test_homeserver(
|
handlers=None,
http_client=self.mock_http_client,
keyring=Mock(),
)
self.filtering = hs.get_filtering()
self.datastore = hs.get_datastore()
def test_definition_types_works_with_literals(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_wildcards(self):
definition = {
"types": ["m.*", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_unknowns(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="now.for.something.completely.different",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_literals(self):
definition = {
"not_types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_wildcards(self):
definition = {
"not_types": ["m.room.message", "org.matrix.*"]
}
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_unknowns(self):
definition = {
"not_types": ["m.*", "org.*"]
}
event = MockEvent(
sender="@foo:bar",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_types_takes_priority_over_types(self):
definition = {
"not_types": ["m.*", "org.*"],
"types": ["m.room.message", "m.room.topic"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_senders_works_with_literals(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_senders_works_with_unknowns(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_literals(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_unknowns(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_senders_takes_priority_over_senders(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets", "@misspiggy:muppets"]
}
event = MockEvent(
sender="@misspiggy:muppets",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_literals(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_unknowns(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_literals(self):
definition = {
"not_rooms": ["!anothersecretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_unknowns(self):
definition = {
"not_rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_rooms_takes_priority_over_rooms(self):
definition = {
"not_rooms": ["!secretbase:unknown"],
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"
|
gizela/gizela
|
example/matplotlib/axis.py
|
Python
|
gpl-3.0
| 782
| 0.021739
|
# set S-JTSK axes orientation
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
ax=plt.gca()
#ax.set_ylim(ax.get_ylim()[::-1])
# direction of axes
ax.invert_xaxis()
ax.invert_yaxis()
# ticks position
for tick in ax.xaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
for tick in ax.yaxis.get_major_ticks(
|
):
tick.label1On = False
tick.label2On = True
plt.plot([1,2,3,1],[3,1,2,3])
# ticks string formatter
import matplotlib.ti
|
cker as ticker
formatter = ticker.FormatStrFormatter('%.2f m')
ax.xaxis.set_major_formatter(formatter)
# ticks func formatter
def format(x, pos):
return "%s - %s" % (x,pos)
formatter = ticker.FuncFormatter(format)
ax.xaxis.set_major_formatter(formatter)
plt.show()
|
da1z/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_referrers.py
|
Python
|
apache-2.0
| 8,753
| 0.005141
|
import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
import traceback
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(
|
', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#========================================================================================
|
===========
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
#Ignore this frame and any caller frame of this frame
ignore_frames = {} #Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
all_objects = None
for r in referrers:
try:
if r in ignore_frames:
continue #Skip the references we may add ourselves
except:
pass #Ok: unhashable type checked...
if r is referrers:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
#Ok, there's one annoying thing: many times we find it in a dict from an instance,
#but with this we don't directly have the class, only the dict, so, to workaround that
#we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass #Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
#Don't use enumerate() because not all Python versions have it.
i = 0
for x in r:
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
i += 1
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additionalInXml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
#If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
|
dyn888/youtube-dl
|
youtube_dl/extractor/aenetworks.py
|
Python
|
unlicense
| 2,401
| 0.002082
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import smuggle_url
class AENetworksIE(InfoExtractor):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network'
_VALID_URL = r'https?://(?:www\.)?(?:(?:history|aetv|mylifetime)\.com|fyi\.tv)/(?:[^/]+/)+(?P<id>[^/]+?)(?:$|[?#])'
_TESTS = [{
'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
'info_dict': {
'id': 'g12m5Gyt3fdR',
'ext': 'mp4',
'title': "Bet You Didn't Know: Valentine's Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
'expected_warnings': ['JSON-LD'],
}, {
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'info_dict': {
'id': 'eg47EERs_JsZ',
'ext': 'mp4',
'title': "Winter Is Coming",
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/video/inlawful-entry',
'only_matching': True
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/videos/207-sq-
|
ft-minnesota-prairie-cottage',
'only_matching': True
}, {
'url': 'http://www.
|
mylifetime.com/shows/project-runway-junior/video/season-1/episode-6/superstar-clients',
'only_matching': True
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url_re = [
r'data-href="[^"]*/%s"[^>]+data-release-url="([^"]+)"' % video_id,
r"media_url\s*=\s*'([^']+)'"
]
video_url = self._search_regex(video_url_re, webpage, 'video url')
info = self._search_json_ld(webpage, video_id, fatal=False)
info.update({
'_type': 'url_transparent',
'url': smuggle_url(video_url, {'sig': {'key': 'crazyjava', 'secret': 's3cr3t'}}),
})
return info
|
thiagopena/PySIGNFe
|
pysignfe/nfse/bhiss/v10/ConsultarSituacaoLoteRps.py
|
Python
|
lgpl-2.1
| 5,547
| 0.010817
|
# -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
from .Rps import IdentificacaoPrestador, IdentificacaoRps
import os
DIRNAME = os.path.dirname(__file__)
class MensagemRetorno(XMLNFe):
def __init__(self):
super(MensagemRetorno, self).__init__()
self.Codigo = TagCaracter(nome=u'Codigo', tamanho=[1, 4], raiz=u'/[nfse]')
self.Mensagem = TagCaracter(nome=u'Mensagem', tamanho=[1, 200], raiz=u'/[nfse]')
self.Correcao = TagCaracter(nome=u'Correcao', tamanho=[0, 200], raiz=u'/[nfse]')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<MensagemRetorno>'
xml += self.Codigo.xml
xml += self.Mensagem.xml
xml += self.Correcao.xml
xml += u'</MensagemRetorno>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Codigo.xml = arquivo
self.Mensagem.xml = arquivo
self.Correcao.xml = arquivo
xml = property(get_xml, set_xml)
class MensagemRetornoLote(XMLNFe):
def __init__(self):
super(MensagemRetornoLote, self).__init__()
self.IdentificacaoRps = IdentificacaoRps()
self.Codigo = TagCaracter(nome=u'Codigo', tamanho=[1, 4], raiz=u'/[nfse]')
self.Mensagem = TagCaracter(nome=u'Mensagem', tamanho=[1, 200], raiz=u'/[nfse]')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<MensagemRetornoLote>'
xml += self.IdentificacaoRps.xml
xml += self.Codigo.xml
xml += self.Mensagem.xml
xml += u'</MensagemRetornoLote>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.IdentificacaoRps.xml = arquivo
self.Codigo.xml = arquivo
self.Mensagem.xml = arquivo
xml = property(get_xml, set_xml)
class ListaMensagemRetornoLote(XMLNFe):
def __init__(self):
super(ListaMensagemRetornoLote, self).__init__()
self.MensagemRetornoLote = []
def get
|
_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ListaMensagemRetornoLote>'
for m in self.MensagemRetornoLote:
xml += tira_abertura(m.xml)
xml += u'</ListaMensagemRetornoLote>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.MensagemRetornoLote =
|
self.le_grupo('[nfse]//ListaMensagemRetornoLote/MensagemRetornoLote', MensagemRetornoLote)
xml = property(get_xml, set_xml)
class ListaMensagemRetorno(XMLNFe):
def __init__(self):
super(ListaMensagemRetorno, self).__init__()
self.MensagemRetorno = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ListaMensagemRetorno>'
for m in self.MensagemRetorno:
xml += tira_abertura(m.xml)
xml += u'</ListaMensagemRetorno>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.MensagemRetorno = self.le_grupo('[nfse]//ListaMensagemRetorno/MensagemRetorno', MensagemRetorno)
xml = property(get_xml, set_xml)
class ConsultarSituacaoLoteRpsEnvio(XMLNFe):
def __init__(self):
super(ConsultarSituacaoLoteRpsEnvio, self).__init__()
self.versao = TagDecimal(nome=u'ConsultarSituacaoLoteRpsEnvio', propriedade=u'versao', namespace=NAMESPACE_NFSE, valor=u'1.00', raiz=u'/')
self.Prestador = IdentificacaoPrestador()
self.Protocolo = TagCaracter(nome=u'Protocolo', tamanho=[ 1, 50], raiz=u'/')
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarSituacaoLoteRpsEnvio xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.Prestador.xml.replace(ABERTURA, u'')
xml += self.Protocolo.xml
xml += u'</ConsultarSituacaoLoteRpsEnvio>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Prestador.xml = arquivo
self.Protocolo.xml = arquivo
xml = property(get_xml, set_xml)
class ConsultarSituacaoLoteRpsResposta(XMLNFe):
def __init__(self):
super(ConsultarSituacaoLoteRpsResposta, self).__init__()
self.NumeroLote = TagInteiro(nome=u'NumeroLote', tamanho=[1, 15], raiz=u'/')
self.Situacao = TagInteiro(nome=u'Situacao', tamanho=[1, 1], raiz=u'/')
self.ListaMensagemRetorno = ListaMensagemRetorno()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarSituacaoLoteRpsResposta xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.NumeroLote.xml
xml += self.Situacao.xml
xml += self.ListaMensagemRetorno.xml.replace(ABERTURA, u'')
xml += u'</ConsultarSituacaoLoteRpsResposta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.NumeroLote.xml = arquivo
self.Situacao.xml = arquivo
self.ListaMensagemRetorno.xml = arquivo
xml = property(get_xml, set_xml)
|
max00xam/service.maxxam.teamwatch
|
lib/engineio/client.py
|
Python
|
gpl-3.0
| 25,302
| 0.00004
|
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if client.is_asyncio_based():
client.start_background_task(client.disconnect, abort=True)
else:
client.disconnect(abort=True)
return original_signal_handler(sig, frame)
# original_signal_handler = signal.signal(signal.SIGINT, signal_handler)
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self, logger=False, json=None):
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = None
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = self.create_event()
self.queue = None
self.state = 'disconnected'
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custo
|
m headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
|
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns a
|
aokolnychyi/spark
|
python/pyspark/shell.py
|
Python
|
apache-2.0
| 3,126
| 0.003199
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Versi
|
on 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl
|
icable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched as a PYTHONSTARTUP script.
"""
import atexit
import os
import platform
import warnings
import py4j
from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, SQLContext
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
SparkContext._ensure_initialized()
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
spark = SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError:
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
spark = SparkSession.builder.getOrCreate()
except TypeError:
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
sql = spark.sql
atexit.register(lambda: sc.stop())
# for compatibility
sqlContext = spark._wrapped
sqlCtx = sqlContext
print("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version %s
/_/
""" % sc.version)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
print("SparkSession available as 'spark'.")
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,
# which allows us to execute the user's PYTHONSTARTUP file:
_pythonstartup = os.environ.get('OLD_PYTHONSTARTUP')
if _pythonstartup and os.path.isfile(_pythonstartup):
with open(_pythonstartup) as f:
code = compile(f.read(), _pythonstartup, 'exec')
exec(code)
|
elkeschaper/tral
|
tral/examples/example_workflow_MBE2014.py
|
Python
|
gpl-2.0
| 4,872
| 0.001643
|
'''
Implementation of the workflow used in :
Schaper,E. et al. (2014) Deep conservation of human protein tandem repeats within the eukaryotes. Molecular Biology and Evolution. 31, 1132–1148 .
'''
import logging
import logging.config
import os
from tral.paths import config_file, PACKAGE_DIRECTORY
from tral.sequence import sequence
from tral.hmm import hmm
logging.config.fileConfig(config_file("logging.ini"))
log = logging.getLogger('root')
TEST_FAA_FILE_MBE_2014 = os.path.join(
PACKAGE_DIRECTORY,
"test",
"P51610.fasta")
TEST_HMM_FILES_MBE_2014 = [
os.path.join(
PACKAGE_DIRECTORY,
"test",
"Kelch_1.hmm"),
os.path.join(
PACKAGE_DIRECTORY,
"test",
"Kelch_2.hmm")]
TEST_SCORE_MBE_2014 = "phylo_gap01_ignore_trailing_gaps_and_coherent_deletions"
def path():
"""Return the path to the test data files.
"""
return os.path.join(os.path.abspath('.'), 'tandemrepeats', 'test')
def sample_MBE_2014_pipeline():
# The Schaper et al. (MBE, 2014) pipeline is tested on a single sequence.
test_lSeq = sequence.Sequence.create(
os.path.join(
path(),
TEST_FAA_FILE_MBE_2014),
input_format="fasta")
test_seq = test_lSeq[0]
# Information on sequence domains (here: Pfam) in this sequence are added.
test_pfam_hmm = [
hmm.HMM.create(
input_format="hmmer",
file=os.path.join(
path(),
i)) for i in TEST_HMM_FILES_MBE_2014]
# The sequence is searched for tandem repetitions of the Pfam domain in
# the sequence
test_pfam_list = test_seq.detect(lHMM=test_pfam_hmm)
assert len(test_pfam_list.repeats) == 2
# Pfam TRs with n_effective < 3.5 are discarded.
test_pfam_list = test_pfam_list.filter("attribute", "n_effective", "min", 3.5)
assert len(test_pfam_list.repeats) == 2
# de novo detection methods (Tr
|
ust, T-reks, Xstream, HHrepID) are used to search the
# INSERT OWN PARAMTERS USING: test_denovo_list = test_seq.detect(denovo =
# True, **TEST_DENOVO_PARAMETERS)
test_denovo_list = test_seq.detect(denovo=True)
# When Trust is part of the detectors, the number of found repeats may
# differ between runs...
assert len(test_d
|
enovo_list.repeats) == 10
# De novo TRs with dTR_units (divergence) > 0.8; n_effective < 2.5; l < 10 or
# pvalue "phylo_gap01_ignore_trailing_gaps_and_coherent_deletions" > 0.01
# are discarded.
test_denovo_list = test_denovo_list.filter(
"pvalue",
TEST_SCORE_MBE_2014,
0.01)
assert len(test_denovo_list.repeats) == 10
test_denovo_list = test_denovo_list.filter(
"divergence",
TEST_SCORE_MBE_2014,
0.8)
assert len(test_denovo_list.repeats) == 10
test_denovo_list = test_denovo_list.filter("attribute", "n_effective", "min", 2.5)
assert len(test_denovo_list.repeats) == 5
test_denovo_list = test_denovo_list.filter("attribute", "l_effective", "min", 10)
assert len(test_denovo_list.repeats) == 2
# De novo TRs were remastered with HMM
test_denovo_hmm = [
hmm.HMM.create(
input_format='repeat',
repeat=iTR) for iTR in test_denovo_list.repeats]
test_denovo_list_remastered = test_seq.detect(lHMM=test_denovo_hmm)
assert len(test_denovo_list_remastered.repeats) == 2
# pvalue "phylo_gap01_ignore_trailing_gaps_and_coherent_deletions" > 0.1
# are discarded.
test_denovo_list_remastered = test_denovo_list_remastered.filter(
"pvalue",
TEST_SCORE_MBE_2014,
0.1)
# De novo TRs were filtered (n_effective < 3.5 are discarded.)
test_denovo_list_remastered = test_denovo_list_remastered.filter(
"attribute",
"n_effective",
"min",
3.5)
assert len(test_denovo_list_remastered.repeats) == 2
# De novo TRs overlapping with a Pfam TR were filtered
test_denovo_list_remastered = test_denovo_list_remastered.filter(
"none_overlapping_fixed_repeats",
test_pfam_list,
"shared_char")
assert len(test_denovo_list_remastered.repeats) == 2
# Remaining De novo TRs were clustered for overlap (common ancestry). Only best =
# lowest p-Value and lowest divergence were retained.
test_denovo_list_remastered = test_denovo_list_remastered.filter(
"none_overlapping", ["common_ancestry"], {
"pvalue": TEST_SCORE_MBE_2014, "divergence": TEST_SCORE_MBE_2014})
assert len(test_denovo_list_remastered.repeats) == 1
# Merge remaining set of de novo and Pfam TRs.
test_entire_set = test_pfam_list + test_denovo_list_remastered
assert len(test_entire_set.repeats) == 3
# Write result set of Pfam TRs
#test_entire_set.write(format = "tsv,...")
if __name__ == "__main__":
sample_MBE_2014_pipeline()
|
unioslo/cerebrum
|
Cerebrum/modules/event_publisher/event.py
|
Python
|
gpl-2.0
| 11,579
| 0
|
# encoding: utf-8
#
# Copyright 2017 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Pu
|
blic License
# along with Cerebrum; if not, write to the Fr
|
ee Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" An abstract event that can be stored in the database. """
from __future__ import absolute_import
import datetime
import itertools
import mx.DateTime
import pytz
import cereconf
class _VerbSingleton(type):
""" A metaclass that makes each EventType verb a singleton. """
verbs = {}
def __call__(cls, verb, *args):
if verb not in cls.verbs:
cls.verbs[verb] = super(_VerbSingleton, cls).__call__(verb, *args)
return cls.verbs[verb]
def get_verb(cls, verb):
return cls.verbs.get(verb)
class EventType(_VerbSingleton('EventTypeSingleton', (object,), {})):
"""Holds an event type."""
__slots__ = ['verb', 'description', ]
def __init__(self, verb, description):
""" Initialize EventType.
:verb: Scim verb
:description: HR description text
"""
self.verb = verb
self.description = description
def __repr__(self):
return '<{0.__class__.__name__!s} {0.verb}>'.format(self)
def __eq__(self, other):
"""Equality."""
return isinstance(other, EventType) and other.verb == self.verb
def __hash__(self):
"""Hash."""
return hash(self.verb)
# Define event types:
ADD = EventType('add', 'Add an object to subject')
CREATE = EventType('create', 'Create a new subject')
ACTIVATE = EventType('activate', 'Subject has no longer quarantines in system')
MODIFY = EventType('modify', 'Attributes has changed')
DEACTIVATE = EventType('deactivate', 'Quarantine is activated')
DELETE = EventType('delete', 'Subject is deleted')
REMOVE = EventType('remove', 'Remove an object from subject')
PASSWORD = EventType('password', 'Subject has changed password')
JOIN = EventType('join', 'Join two objects')
class EntityRef(object):
""" Representation of a single entity.
The entity_id can be used internally to identify which object we reference
The entity_type and ident is used to generate a reference to the object
that other systems can use.
"""
__slots__ = ['ident', 'entity_type', 'entity_id', ]
def __init__(self, entity_id, entity_type, ident):
self.entity_id = int(entity_id)
self.entity_type = entity_type
self.ident = ident
def __repr__(self):
return ("<{0.__class__.__name__}"
" id={0.entity_id!r}"
" type={0.entity_type!r}"
" ident={0.ident!r}>").format(self)
def __eq__(self, other):
return (isinstance(other, EntityRef) and
self.entity_id == other.entity_id)
def to_dict(self):
return {
'ident': self.ident,
'entity_id': self.entity_id,
'entity_type': self.entity_type, }
class DateTimeDescriptor(object):
""" Datetime descriptor that handles timezones.
When setting the datetime, this method will try to localize it with the
default_timezone in the following ways:
- mx.DateTime.DateTimeType: Naive datetime, assume in default_timezone
- datetime.datetime: Assume in default_timezone if naive
- integer: Assume timestamp in UTC
The returned object will always be a localized datetime.datetime
"""
default_timezone = pytz.timezone(cereconf.TIMEZONE)
def __init__(self, slot):
""" Creates a new datetime descriptor.
:param str slot:
The attribute name where the actual value is stored.
"""
self.slot = slot
def __repr__(self):
return '{0.__class__.__name__}({0.slot!r})'.format(self)
def __get__(self, obj, cls=None):
if not obj:
return self
return getattr(obj, self.slot, None)
def __set__(self, obj, value):
if value is None:
self.__delete__(obj)
return
if isinstance(value, (int, long, )):
# UTC timestamp
value = pytz.utc.localize(
datetime.datetime.fromtimestamp(value))
elif isinstance(value, mx.DateTime.DateTimeType):
# Naive datetime in default_timezone
value = self.default_timezone.localize(value.pydatetime())
elif isinstance(value, datetime.datetime):
if value.tzinfo is None:
value = self.default_timezone.localize(value)
else:
raise TypeError('Invalid datetime {0} ({1})'.format(type(value),
repr(value)))
setattr(obj, self.slot, value)
def __delete__(self, obj):
if hasattr(obj, self.slot):
delattr(obj, self.slot)
class Event(object):
""" Event abstraction.
Contains all the neccessary data to serialize an event.
"""
DEFAULT_TIMEZONE = 'Europe/Oslo'
__slots__ = ['event_type', 'subject', 'objects', 'context', 'attributes',
'_timestamp', '_scheduled', ]
timestamp = DateTimeDescriptor('_timestamp')
scheduled = DateTimeDescriptor('_scheduled')
def __init__(self, event_type,
subject=None,
objects=None,
context=None,
attributes=None,
timestamp=None,
scheduled=None):
"""
:param EventType event: the type of event
:param EntityRef subject: reference to the affected entity
:param list objects: sequence of other affected objects (EntityRef)
:param list context: sequence of affected systems (str)
:param list attributes: sequence of affected attributes (str)
:param datetime timestamp: when the event originated
:param datetime schedule: when the event should be issued
"""
self.event_type = event_type
self.subject = subject
self.timestamp = timestamp
self.scheduled = scheduled
self.objects = set(objects or [])
self.context = set(context or [])
self.attributes = set(attributes or [])
def __repr__(self):
return ('<{0.__class__.__name__}'
' event={0.event_type!r}'
' subject={0.subject!r}>').format(self)
def mergeable(self, other):
"""Can this event be merged with other."""
if self.scheduled is not None:
return False
if self.subject != other.subject:
return False
if self.event_type == CREATE:
return other.event_type not in (DEACTIVATE, REMOVE)
if self.event_type == DELETE:
return other.event_type in (REMOVE, DEACTIVATE, ADD, ACTIVATE,
MODIFY, PASSWORD)
if (self.event_type == other.event_type and
self.event_type in (ADD, REMOVE, ACTIVATE, DEACTIVATE)):
return True
if self.context != other.context:
return False
return True
def merge(self, other):
"""Merge messages."""
def ret_self():
self.objects.update(other.objects)
return [self]
if not self.mergeable(other):
return [self, other]
if self.event_type == CREATE:
if other.event_type == DELETE:
return []
if other.event_type == ADD:
self.context.update(other.context)
return ret_self()
if other.event_type == ACTIVATE:
return ret_self()
|
scattermagic/django-wizard-builder
|
wizard_builder/tests/urls.py
|
Python
|
bsd-3-clause
| 534
| 0
|
from django.conf.urls import include, url
from
|
django.contrib import admin
from django.views.generic.base import RedirectView
from .. import views
urlpatterns = [
url(r'^$',
views.NewWizardView.as_view(),
),
url(r'^new/$',
views.NewWizardView.as_view(),
name='wizard_new',
),
url(r'^ste
|
p/(?P<step>.+)/$',
views.WizardView.as_view(),
name='wizard_update',
),
url(r'^nested_admin/', include('nested_admin.urls')),
url(r'^admin/', admin.site.urls),
]
|
skylines-project/skylines
|
tests/api/views/about_test.py
|
Python
|
agpl-3.0
| 664
| 0.001506
|
def test_imprint(app, client):
app.config["SKYLINES_IMPRINT"] = u"foobar"
res = client.get("/imprint")
assert res.status_code == 200
assert res.json == {u"content": u"foobar"}
def test_team(client):
res = client.get("/team")
assert res.status_code == 200
content = res.json["content"]
assert "## Developers" in content
assert "* Tobias Bieniek (<tobias.bieniek@gmx.de> // maintainer)\n" in content
assert "## Developers" in content
def test_
|
license(client):
res = client.get("/license")
assert res.status
|
_code == 200
content = res.json["content"]
assert "GNU AFFERO GENERAL PUBLIC LICENSE" in content
|
south-coast-science/scs_dfe_eng
|
tests/gas/isi/elc_dsi_t1_test.py
|
Python
|
mit
| 1,580
| 0
|
#!/usr/bin/env python3
"""
Created on 27 May 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import sys
import time
from scs_dfe.gas.isi.elc_dsi_t1 import ElcDSIt1
from scs_dfe.interface.interface_conf import InterfaceConf
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
controller = ElcDSIt1(ElcDSIt1.DEFAULT_ADDR)
print(controller)
# --------------------------------------------------------------------------------------------------------------------
try:
I2C.Sensors.open()
interface_conf = InterfaceConf.load(Host)
print(interface_conf)
interface = inte
|
rface_conf
|
.interface()
print(interface)
interface.power_gases(True)
ident = controller.version_ident()
print("ident:[%s]" % ident)
tag = controller.version_tag()
print("tag:[%s]" % tag)
print("-")
for _ in range(5):
controller.start_conversion()
time.sleep(0.1)
c_wrk, c_aux = controller.read_conversion_voltage()
print('{"wrk": %f, "aux": %f}' % (c_wrk, c_aux))
sys.stdout.flush()
time.sleep(2.0)
print("-")
while True:
controller.start_conversion()
time.sleep(0.1)
v_wrk, v_aux = controller.read_conversion_voltage()
print('{"wrk": %0.5f, "aux": %0.5f}' % (v_wrk, v_aux))
sys.stdout.flush()
time.sleep(2.0)
except KeyboardInterrupt:
print("-")
finally:
I2C.Sensors.close()
|
audreyr/opencomparison
|
package/templatetags/package_tags.py
|
Python
|
mit
| 1,974
| 0.001013
|
from datetime import datetime, timedelta
from django import template
from package.models import Commit
from package.context_processors import used_packages_list
register = template.Library()
class ParticipantURLNode(template.Node):
def __init__(self, repo, participant):
self.repo = template.Variable(repo)
self.participant = template.Variable(participant)
def render(self, context):
repo = self.repo.resolve(context)
participant = self.participant.resolve(context)
if repo.user_url:
user_url = repo.user_url % participant
else:
user_url = '%s/%s' % (repo.url, participant)
return user_url
@register.tag
def participant_url(parser, token):
try:
tag_name, repo, participant = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % token.contents.split()[0]
return ParticipantURLNode(repo, participant)
@register.filter
def commits_over_52(package):
now = datetime.now()
commits = Commit.objects.filter(
package=package,
commit_date__gt=now - timedelta(weeks=52),
).values_list('commit_date', flat=True)
weeks = [0] * 52
for cdate in commits:
|
age_weeks = (now - cdate).days // 7
if age_weeks < 52:
weeks[age_weeks] += 1
return ','.join(map(str, reversed(weeks)))
@register.inclusion_tag('package/templatetags/_usage_button.html
|
', takes_context=True)
def usage_button(context):
response = used_packages_list(context['request'])
response['STATIC_URL'] = context['STATIC_URL']
response['package'] = context['package']
if context['package'].pk in response['used_packages_list']:
response['usage_action'] = "remove"
response['image'] = "usage_triangle_filled"
else:
response['usage_action'] = "add"
response['image'] = "usage_triangle_hollow"
return response
|
yvesalexandre/bandicoot
|
bandicoot/helper/tools.py
|
Python
|
mit
| 8,941
| 0.000895
|
# The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import OrderedDict as NativeOrderedDict
from functools import update_wrapper
from datetime import timedelta
import itertools
import inspect
import json
import logging
import hashlib
import sys
import os
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
from bandicoot.core import User
if isinstance(obj, User):
return repr(obj)
return json.JSONEncoder.default(self, obj)
class OrderedDict(NativeOrderedDict):
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
# This solution is easy to implement but not robust
s = json.dumps(self, cls=CustomEncoder, indent=4)
transformations = [
('"<', '<'),
('>"', '>'),
('null', 'None')
]
for old_pattern, new_pattern in transformations:
s = s.replace(old_pattern, new_pattern)
return s
def _repr_pretty_(self, p, cycle):
p.text(self.__repr__())
def advanced_wrap(f, wrapper):
"""
Wrap a decorated function while keeping the same keyword arguments
"""
f_sig = list(inspect.getargspec(f))
wrap_sig = list(inspect.getargspec(wrapper))
# Update the keyword arguments of the wrapper
if f_sig[3] is None or f_sig[3] == []:
f_sig[3], f_kwargs = [], []
else:
f_kwargs = f_sig[0][-len(f_sig[3]):]
for key, default in zip(f_kwargs, f_sig[3]):
wrap_sig[0].append(key)
wrap_sig[3] = wrap_sig[3] + (default, )
wrap_sig[2] = None # Remove kwargs
src = "lambda %s: " % (inspect.formatargspec(*wrap_sig)[1:-1])
new_args = inspect.formatargspec(
wrap_sig[0], wrap_sig[1], wrap_sig[2], f_kwargs,
formatvalue=lambda x: '=' + x)
src += 'wrapper%s\n' % new_args
decorated = eval(src, locals())
decorated.func = f
return update_wrapper(decorated, f)
class Colors:
"""
The `Colors` class stores six codes to color a string. It can be used to print
messages inside a terminal prompt.
Examples
--------
>>> print Colors.FAIL + "Error: it's a failure!" + Colors.ENDC
Attributes
----------
HEADER
Header color.
OKBLUE
Blue color for a success message
OKGREEN
Green color for a success message
WARNING
Warning color (yellow)
FAIL
Failing color (red)
ENDC
Code to disable coloring. Always add it after coloring a string.
"""
def __init__(self):
pass
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[32m'
WARNING = '\033[33m'
FAIL = '\033[31m'
ENDC = '\033[0m'
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
@classmethod
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
def write(self, text, color):
"""
Write the given text to the stream in the given color.
"""
color = self._colors[color]
self.stream.write('\x1b[{}m{}\x1b[0m'.format(color, text))
class ColorHandler(logging.StreamHandler):
def __init__(self, stream=sys.stderr):
super(ColorHandler, self).__init__(_AnsiColorizer(stream))
def emit(self, record):
msg_colors = {
logging.DEBUG: ("Debug", "green"),
logging.INFO: ("Info", "blue"),
logging.WARNING: ("Warning", "yellow"),
logging.ERROR: ("Error", "red")
}
header, color = msg_colors.get(record.levelno, "blue")
if 'prefix' in record.__dict__:
header = record.prefix
else:
header = header + ':'
self.stream.write("{} {}\n".format(header, record.msg), color)
def percent_records_missing_location(user, method=None):
"""
Return the percentage of records missing a location parameter.
"""
if len(user.records) == 0:
return 0.
missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None])
return float(missing_locations) / len(user.records)
def percent_overlapping_calls(records, min_gab=300):
"""
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
"""
calls = [r for r in records if r.interaction == "call"]
if len(calls) == 0:
return 0.
overlapping_calls = 0
for i, r in enumerate(calls):
if i <= len(calls) - 2:
if r.date
|
time + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime:
overlapping_calls += 1
return (float(overlapping_calls) / len(calls))
def antennas_missing_locations(user, Method=None):
"""
Return the number of antennas missing locations in the records of a given user.
"""
unique_antennas = set([record.position.antenna for record in user.records
if record.position.a
|
ntenna is not None])
return sum([1 for antenna in unique_antennas if user.antennas.get(antenna) is None])
def pairwise(iterable):
"""
Returns pairs from an interator: s -> (s0,s1), (s1,s2), (s2, s3)...
"""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
class AutoVivification(OrderedDict):
"""
Implementation of perl's autovivification feature.
Under CC-BY-SA 3.0 from nosklo on stackoverflow:
http://stackoverflow.com/questions/19189274/defaultdict-of-defaultdict-nested
"""
def __getitem__(self, item):
try:
return OrderedDict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def insert(self, keys, value):
if len(keys) == 1:
self[keys[0]] = value
else:
|
iamweilee/pylearn
|
cstringio-example-2.py
|
Python
|
mit
| 249
| 0.012048
|
'''
ΪÁËÈÃÄãµÄ´úÂ뾡¿
|
ÉÄÜ¿ì, µ«Í
|
¬Ê±±£Ö¤¼æÈݵͰ汾µÄ Python ,Äã¿ÉÒÔʹÓÃÒ»¸öС¼¼ÇÉÔÚ cStringIO ²»¿ÉÓÃʱÆôÓà StringIO Ä£¿é, Èç ÏÂÀý Ëùʾ.
'''
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
print StringIO
|
mstiri/p3270
|
setup.py
|
Python
|
gpl-3.0
| 1,698
| 0.014134
|
#from distutils.core import setup
import setuptools
setuptools.setup(
name = 'p3270',
packages = ['p3270'],
version = '0.1.3',
description = 'Python library to communicate with IBM hosts',
author = 'Mossaab Stiri',
author_email = 'mossaab.stiri@gmail.com',
url = 'https://github.com/mstiri/p3270',
long_description = '''
A Python library that provides an interface to communicate with IBM hosts: send commands and text, receive output (screens). The library provides the means to do what a human can do using a 3270 emulator.
The library is highly customizable and is built with simplicity in mind.
It is written in Python 3, runs on Linux and Unix-like Operating Systems, and relies on the `s3270` utility. So it is required to have the `s3270` installed on your system and available on your PATH.
The library allows you to open a telnet connection to an IBM host, and execute a set of instructions as you specified them in your python program.
''',
keywords = 'IBM CICS 3270 TN3270 test automation Mainframe z/OS',
classifiers = [
'Intended Audience :: Developers',
'Programming Language :
|
: Python',
'Programming Language ::
|
Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Development Status :: 4 - Beta'
]
)
|
Vaypron/ChromaPy
|
Example Scripts/Keypad/3. setbyRow.py
|
Python
|
mit
| 370
| 0
|
import ChromaPy32 as Chroma # Import the Chroma Module
from time import sleep
Keypad = Chroma.Keypad() # Initialize a new Keypad Instance
RED = (255, 0, 0) # Initialize a new color by RGB (RED,GREEN,BLUE)
Keypad.setbyRow(2, RED) # sets the whole th
|
ird row of the Keyboad-Grid to red
Keypad.applyGrid() # app
|
lies the Keypad-Grid to the connected Keypad
sleep(5)
|
utkbansal/tardis
|
tardis/plasma/tests/test_property_atomic.py
|
Python
|
bsd-3-clause
| 836
| 0.009569
|
import numpy as np
def test_levels_property(excitation_energy):
assert np.isclose(excitation_energy.ix[2].ix[0].ix[1], 3.17545416e-11)
def test_lines_property(lines):
assert np.isclose(lines.ix[564954]['wavelength'], 10833.307)
assert lines.index[124] == 564954
def test_lines_lower_level_index_property(lines_lower_level_index):
assert lines_lower_level_index[9] == 0
def test_lines_upper_level_index_property(lines_upper_level_index):
assert lines_upper_level_index[9] == 30
def test_atomic_mass_property(atomic_mass):
assert np.isclose(atomic_mass.ix[2], 6.6464755216973998e-24)
def test_i
|
onization_data_property(ionization_data):
assert np.isclose(float(ionization_data.ix[2].ix[1]), 3.9393336e-11)
def test_zeta_data_property(ze
|
ta_data):
assert np.isclose(zeta_data.ix[2].ix[1][2000], 0.4012)
|
EwgOskol/python_training
|
model/group.py
|
Python
|
apache-2.0
| 683
| 0.002928
|
__author__ = 'tester'
from sys import maxsize
import re
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.h
|
eader = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) \
and re.sub(r'\s+', ' ', self.name.rstrip()) == re.sub(r'\s+', ' ', other.name.rstrip())
def id_or_max(self):
|
if (self.id):
return int(self.id)
else:
return maxsize
|
benhoff/facebook_api_script
|
eye_aligner.py
|
Python
|
gpl-3.0
| 1,214
| 0.004942
|
import os
import cv2
from crop_pictures import CropFace
file_dir = os.path.dirname(os.path.realpath(__file__))
cropped_photos_dir = os.path.join(file_dir,
|
'cropped_photos',
'')
eye_cascade_filepath = os.path.join(file_dir, 'haarcascade_eye.xml')
eye_classifier = cv2.CascadeClassifier(eye_cascade_filepath)
all_cropped_photos = os.listdir(cropped_photos_dir)
eye_coord_list = []
for photo_filename in all_cro
|
pped_photos:
image = cv2.imread(photo_filename)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
eyes = eye_classifier.detectMultiScale(gray_image, 1.1, 2, 0|cv2.CASCADE_SCALE_IMAGE, (30, 30))
if len(eyes) > 2:
print('More eyes than you can shake a stick at!')
if len(eyes) < 2:
print('less eyes than you should have. Or pirate')
# NOTE: Assumes it returns in the same order, left to right
"""
for (x, y, w, h) in eyes:
center_eye = (int(x + w/2), int(y+h/2))
eye_coord_list.append(center)
"""
if len(eyes) > 2:
resized_image = CropImage(image, eye[0], eye[1])
resize_image.save()
# Now we want to align the images by the eyeballs
|
Tbear1981/bitcoin-overseer
|
files/webcrawler.py
|
Python
|
gpl-3.0
| 705
| 0.009929
|
import requests
from bs4 import BeautifulSoup
def trade_spider(max_pages):
page = 1
while page <= max_pages:
url = "https://thenewboston.com/videos.php?cat=98&video=20144" #+ str(page)
source_code = request.get(url)
|
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for link in soup.findAll("a", {"class": "itemname"}):
href = link.get("href")
print("href")
trade_spider(1)
def get_single_item_data(item_url):
source_code = request.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for item_name in soup.findAll("a", {"class": "i-name"}):
print(item_name
|
.string)
|
f3at/feat
|
src/feat/test/integration/test_simulation_graph.py
|
Python
|
gpl-2.0
| 19,012
| 0.000263
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
import uuid
from feat.test.integration import common
from feat.agents.shard import shard_agent
from feat.agents.application import feat
from feat.common import defer
from feat.common.text_helper import format_block
from feat.test.common import attr
from feat.agencies.recipient import IRecipient, dummy_agent
from feat.agents.base.partners import FindPartnerError
class CommonMixin(object):
def partners_of(self, agent):
return set(map(lambda x: x.recipient.route,
agent.query_partners('neighbours')))
def shard_of(self, agent):
self.assertIsInstance(agent, shard_agent.ShardAgent)
return agent.get_shard_id()
@defer.inlineCallbacks
def query_partners(self, agent):
'''
Generator returning the ShardAgent instances of partners being
neighbours of the given ShardAgent.
'''
result = []
for p in agent.query_partners('neighbours'):
ag = yield self.driver.find_agent(p.recipient.key)
result.append(ag.get_agent())
defer.returnValue(result)
def _get_exp(self, *numbers, **kwargs):
res = dict()
res['kings'] = kwargs.get('kings', 0)
for num, index in zip(numbers, range(len(numbers))):
res[index] = num
return res
@defer.inlineCallbacks
def check_structure(self, expected):
expected_kings = expected.pop('kings')
seen_kings = 0
seen = dict(map(lambda x: (x, 0, ), expected.keys()))
for medium in self.driver.iter_agents('shard_agent'):
agent = medium.get_agent()
if agent.is_king():
seen_kings += 1
partners = self.partners_of(agent)
seen[len(partners)] += 1
our_shard = self.shard_of(agent)
# check for self-partnership
self.assertTrue(our_shard not in partners)
# check for symetry of partnership
partners = yield self.query_partners(agent)
for partner in partners:
self.assertTrue(our_shard in self.partners_of(partner))
for expectation, value in expected.iteritems():
self.assertEqual(value, seen[expectation],
"Expected %d shard with %d partners, got %d. "
"This happened while having %d agents in "
"total." % (
value, expectation, seen[expectation],
self.count_agents()))
self.assertEqual(expected_kings, seen_kings,
"Expected the graph to have %d kings, %d seen. "
"This happened while having %d agents in total."
|
% (
expected_kings, seen_kings, self.count_agents()))
@defer.inlineCallbacks
def start_host(self, join_shard=True):
script = format_block("""
desc = descriptor_f
|
actory('host_agent')
agency = spawn_agency(start_host=False)
medium = agency.start_agent(desc, run_startup=False)
agent = medium.get_agent()
""")
yield self.process(script)
agent = self.get_local('agent')
if join_shard:
yield agent.start_join_shard_manager()
yield self.wait_for_idle(20)
defer.returnValue(agent)
@attr(timescale=0.05)
class DivorceSimulation(common.SimulationTest, CommonMixin):
timeout = 20
@defer.inlineCallbacks
def prolog(self):
script = format_block("""
agency = spawn_agency()
agency.start_agent(descriptor_factory('shard_agent', shard=uuid()), \
run_startup=False)
agent1 = _.get_agent()
agency = spawn_agency()
agency.start_agent(descriptor_factory('shard_agent', shard=uuid()), \
run_startup=False)
agent2 = _.get_agent()
agency = spawn_agency()
agency.start_agent(descriptor_factory('shard_agent', shard=uuid()), \
run_startup=False)
agent3 = _.get_agent()
""")
yield self.process(script)
self.agent1, self.agent2, self.agent3 =\
self.get_local('agent1', 'agent2', 'agent3')
self.alloc = list()
for x in range(2):
alloc = yield self.agent3.allocate_resource(neighbours=1)
self.alloc.append(alloc.id)
def assert_partners(self, agent, p_list):
s_list = map(lambda x: self.shard_of(x), p_list)
self.assertEqual(set(s_list), self.partners_of(agent))
_, alloc = agent.list_resource()
self.assertEqual(len(p_list), alloc['neighbours'])
@defer.inlineCallbacks
def test_simple_divorce(self):
# establish partnership agent1 -> agent2
yield self.agent1.propose_to(IRecipient(self.agent2))
self.assert_partners(self.agent1, (self.agent2, ))
self.assert_partners(self.agent2, (self.agent1, ))
self.assertEqual(set([self.shard_of(self.agent1)]),
self.partners_of(self.agent2))
# now put agent3 in the middle
yield self.agent1.divorce_action(IRecipient(self.agent2),
IRecipient(self.agent3),
self.alloc)
self.assert_partners(self.agent2, (self.agent3, ))
self.assert_partners(self.agent1, (self.agent3, ))
self.assert_partners(self.agent3, (self.agent1, self.agent2))
@defer.inlineCallbacks
def test_divorce_divorcee_is_a_partner(self):
# establish partnership agent1 -> agent2
yield self.agent1.propose_to(IRecipient(self.agent2))
self.assert_partners(self.agent1, (self.agent2, ))
self.assert_partners(self.agent2, (self.agent1, ))
# establish partnership agent2 -> agent3
yield self.agent2.propose_to(IRecipient(self.agent3))
self.assert_partners(self.agent1, (self.agent2, ))
self.assert_partners(self.agent2, (self.agent1, self.agent3))
# now try to put agent3 in the middle between agent1 and agent2
alloc, _ = self.agent3.list_resource()
self.assertEqual(3, alloc['neighbours'])
yield self.agent1.divorce_action(IRecipient(self.agent2),
IRecipient(self.agent3),
self.alloc)
self.assert_partners(self.agent1, (self.agent3, ))
self.assert_partners(self.agent2, (self.agent3, ))
self.assert_partners(self.agent3, (self.agent1, self.agent2))
@defer.inlineCallbacks
def test_divorce_divorcer_is_a_partner(self):
# establish partnership agent1 -> agent3
yield self.agent1.propose_to(IRecipient(self.agent3))
self.assert_partners(self.agent1, (self.agent3, ))
# establish partnership agent1 -> agent2
yield self.agent2.propose_to(IRecipient(self.agent1))
self.assert_partners(self.agent1, (self.agent3, self.agent2))
# now try to put agent3 in the middle between agent1 and agent2
yield self.agent1.divorce_action(IRecipient(self.agent2),
IRecipient(self.agent3),
self.all
|
smartcrib/password-scrambler-ws
|
ScramblerTools/sc_initkey.py
|
Python
|
gpl-3.0
| 1,190
| 0.020168
|
#!/usr/bin/python
"""sc_initkey.py: utility script forS-CRIB Scramble device to format initialisation key
it requires input string of 40 hex characters - project sCribManager - Python."""
'''
@author: Dan Cvrcek
@copyright: Copyright 2013-14, Smart Crib Ltd
@credits: Dan Cvrcek
@license: GPL version 3 (e.g., https://www.gnu.org/copyleft/gpl.html)
@version: 1.0
@email: info@s-crib.com
@status: Test
'''
import hashlib
import sys
import binascii
if __name__ == "__main__":
args = len(sys.argv)-1
if args==1:
|
data = sys.argv[1]
bindata = binascii.unhexlify(data)
hash_obj = hashlib.sha1()
hash_obj.update(bindata)
crc = hash_obj.hexdigest()[:4]
prefix = ''
byte1 = ord(bindata[1])
for i in range(4):
prefix = chr(0x31+(byte1&0x3)) + prefix
byte1 = byte1 / 4
byte0 = ord(bindata[0])
for i in range(4):
prefix = chr(0x31+(byte0&0x3)) + prefi
|
x
byte0 = byte0 / 4
initkey0 = prefix + data[4:] + crc
initkey1 = initkey0.upper()
print(initkey1)
else:
print("This script must be callled with exactly one argument - 40 characters long hex string")
|
marco-lancini/Showcase
|
settings_private.py
|
Python
|
mit
| 984
| 0.012195
|
#
# PRIVATE DATA
#
SECRET_KEY = ''
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
|
DEFAULT_FROM_EMAIL = ""
#
# SOCIAL
#
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
TWITTER_CONSUMER_KEY = ''
TWIT
|
TER_CONSUMER_SECRET = ''
LINKEDIN_CONSUMER_KEY = ''
LINKEDIN_CONSUMER_SECRET = ''
FLICKR_APP_ID = ''
FLICKR_API_SECRET = ''
TUMBLR_CONSUMER_KEY = ''
TUMBLR_CONSUMER_SECRET = ''
# GITHUB_APP_ID = ''
# GITHUB_API_SECRET = ''
# FOURSQUARE_CONSUMER_KEY = ''
# FOURSQUARE_CONSUMER_SECRET = ''
# DROPBOX_APP_ID = ''
# DROPBOX_API_SECRET = ''
# INSTAGRAM_CLIENT_ID = ''
# INSTAGRAM_CLIENT_SECRET = ''
# YAHOO_CONSUMER_KEY = ''
# YAHOO_CONSUMER_SECRET = ''
# GOOGLE_OAUTH2_CLIENT_ID = ''
# GOOGLE_OAUTH2_CLIENT_SECRET = ''
|
mmanhertz/elopic
|
elopic/ui/central_widget.py
|
Python
|
bsd-2-clause
| 1,328
| 0
|
from PySide import QtGui
from PySide.QtCore import Signal
from elo_button_row import EloButtonRow
from picture_area import PictureArea
class CentralWidget(QtGui.QWidget):
left_chosen = Signal()
right_chosen = Signal()
left_deleted = Signal()
right_deleted = Signal()
def __init__(self, left_image_path, right_im
|
age_path, parent=None):
super(CentralWidget, self).__init__(parent=parent)
self._init_ui(left_image_path, right_image_path)
self._init_signals()
def _init_
|
ui(self, left_image_path, right_image_path):
vbox = QtGui.QVBoxLayout(self)
self.pic_area = PictureArea(
left_image_path,
right_image_path,
parent=self
)
self.buttons = EloButtonRow(parent=self)
vbox.addWidget(self.pic_area, stretch=100)
vbox.addWidget(self.buttons, stretch=1)
self.setLayout(vbox)
def _init_signals(self):
self.buttons.left_deleted.connect(self.left_deleted)
self.buttons.left_chosen.connect(self.left_chosen)
self.buttons.right_chosen.connect(self.right_chosen)
self.buttons.right_deleted.connect(self.right_deleted)
def change_pictures(self, left_image_path, right_image_path):
self.pic_area.change_pictures(left_image_path, right_image_path)
|
Tactique/common
|
database/sql_scripts/seeders/template.py
|
Python
|
mit
| 1,052
| 0.003802
|
import os
import csv
import json
from tables.templates import (
ResponseTemplate
)
from seeders.base_seeder import BaseSeeder, print_delete_count
class TemplateSeeder(BaseSeeder):
def __init__(self, session):
BaseSeeder.__init__(self, session)
self.template_data = os.path.join(self.database_dir, 'templates')
def seed(self):
self.clear_templates()
for response_path in os.listdir(self.template_data):
with open(os.path.join(self.template_data, response_path), 'r') as file_:
|
templates = json.loads(file_.read())
for template in templates:
print("Adding template for response %s" % template)
JSONstr = json.dumps(templates[template])
new_template = ResponseTemplate(name=template, json=JSONstr)
self.session.add(new_template)
def clear_templates(self):
print("Clearing all template tables")
print_delete_count(self.session.query(ResponseTemplate).d
|
elete())
|
katemsu/kate_website
|
kate3/utils/bitly.py
|
Python
|
mit
| 6,991
| 0.013446
|
#!/usr/bin/python2.4
#
# Copyright 2009 Empeeric LTD. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import simplejson
import urllib,urllib2
import urlparse
import string
BITLY_BASE_URL = "http://api.bit.ly/"
BITLY_API_VERSION = "2.0.1"
VERBS_PARAM = {
'shorten':'longUrl',
'expand':'shortUrl',
'info':'shortUrl',
'stats':'shortUrl',
'errors':'',
}
class BitlyError(Exception):
'''Base class for bitly errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Api(object):
""" API class for bit.ly """
def __init__(self, login, apikey):
self.login = login
self.apikey = apikey
self._urllib = urllib2
def shorten(self,longURL):
"""
Takes either:
A long URL string and returns shortened URL string
Or a list of long URL strings and returns a list of shortened URL strings.
"""
if not isinstance(longURL, list):
longURL = [longURL]
for index,url in enumerate(longURL):
if not '://' in url:
longURL[index] = "http://" + url
request = self._getURL("shorten",longURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
res = []
for item in json['results'].values():
if item['shortKeywordUrl'] == "":
res.append(item['shortUrl'])
else:
res.append(item['shortKeywordUrl'])
if len(res) == 1:
return res[0]
else:
return res
def expand(self,shortURL):
""" Given a bit.ly url or hash, return long source url """
request = self._getURL("expand",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]['longUrl']
def info(self,shortURL):
"""
Given a bit.ly url or hash,
return information about that page,
such as the long source url
"""
request = self._getURL("info",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]
def stats(self,shortURL):
""" Given a bit.ly url or hash, return traffic and referrer data. """
request = self._getURL("stats",shortURL)
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return Stats.NewFromJsonDict(json['results'])
def errors(self):
""" Get a list of bit.ly API error codes. """
request = self._getURL("errors","")
result = self._fetchUrl(request)
json = simplejson.loads(result)
self._CheckForError(json)
return json['results']
def setUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib: an instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def _getURL(self,verb,paramVal):
if not isinstance(paramVal, list):
paramVal = [paramVal]
params = [
('version',BITLY_API_VERSION),
('format','json'),
('login',self.login),
('apiKey',self.apikey),
]
verbParam = VERBS_PARAM[verb]
if verbParam:
for val in paramVal:
params.append(( verbParam,val ))
encoded_params = urllib.urlencode(params)
return "%s%s?%s" % (BITLY_BASE_URL,verb,encoded_params)
def _fetchUrl(self,url):
'''Fetch a URL
Args:
url: The URL to retrieve
Returns:
A string containing the body of the response.
'''
# Open and return the URL
url_data = self._urllib.urlopen(url).read()
return url_data
def _CheckForError(self, data):
"""Raises a BitlyError if bitly returns an error message.
Args:
data: A python dict created from the bitly json response
Raises:
BitlyError wrapping the bitly error message if one exists.
"""
# bitly errors are relatively unlikely, so it is faster
# to check first, rather th
|
an try and catch the exception
if 'ERROR' in data or data['statusCode'] == 'ERROR':
raise BitlyError, data['errorMessage']
for key in data['results']:
if type(data['results']) is dict and type(data['results'][key]) is dict:
if 'statusCode' in data['results'][key] and data['results'][key]['statusCode'] == 'ERROR':
raise BitlyError, data['results'][key]['errorMessage']
class Stats(ob
|
ject):
'''A class representing the Statistics returned by the bitly api.
The Stats structure exposes the following properties:
status.user_clicks # read only
status.clicks # read only
'''
def __init__(self,user_clicks=None,total_clicks=None):
self.user_clicks = user_clicks
self.total_clicks = total_clicks
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the bitly API
Returns:
A bitly.Stats instance
'''
return Stats(user_clicks=data.get('userClicks', None),
total_clicks=data.get('clicks', None))
if __name__ == '__main__':
testURL1="www.yahoo.com"
testURL2="www.cnn.com"
a=Api(login="pythonbitly",apikey="R_06871db6b7fd31a4242709acaf1b6648")
short=a.shorten(testURL1)
print "Short URL = %s" % short
urlList=[testURL1,testURL2]
shortList=a.shorten(urlList)
print "Short URL list = %s" % shortList
long=a.expand(short)
print "Expanded URL = %s" % long
info=a.info(short)
print "Info: %s" % info
stats=a.stats(short)
print "User clicks %s, total clicks: %s" % (stats.user_clicks,stats.total_clicks)
errors=a.errors()
print "Errors: %s" % errors
|
stefanklug/psd-tools
|
tests/test_info.py
|
Python
|
mit
| 1,195
| 0.005887
|
# -*- coding: utf-8 -*-
from __future__ imp
|
ort absolute_import, unicode_literals
from psd_tools import PSDIm
|
age
from psd_tools.constants import TaggedBlock, SectionDivider, BlendMode
from .utils import decode_psd
def test_1layer_name():
psd = decode_psd('1layer.psd')
layers = psd.layer_and_mask_data.layers.layer_records
assert len(layers) == 1
layer = layers[0]
assert len(layer.tagged_blocks) == 1
block = layer.tagged_blocks[0]
assert block.key == TaggedBlock.UNICODE_LAYER_NAME
assert block.data == 'Фон'
def test_groups():
psd = decode_psd('group.psd')
layers = psd.layer_and_mask_data.layers.layer_records
assert len(layers) == 3+1 # 3 layers + 1 divider
assert layers[1].tagged_blocks[3].key == TaggedBlock.SECTION_DIVIDER_SETTING
assert layers[1].tagged_blocks[3].data.type == SectionDivider.BOUNDING_SECTION_DIVIDER
def test_api():
image = PSDImage(decode_psd('1layer.psd'))
assert len(image.layers) == 1
layer = image.layers[0]
assert layer.name == 'Фон'
assert layer.bbox == (0, 0, 101, 55)
assert layer.visible
assert layer.opacity == 255
assert layer.blend_mode == BlendMode.NORMAL
|
elifesciences/elife-tools
|
tests/fixtures/test_media/content_07_expected.py
|
Python
|
mit
| 7,974
| 0
|
# output from elife04493.xml
expected = [
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v001.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.007",
"type": "media",
"sibling_ordinal": 1,
"position": 1,
"ordinal": 1,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v002.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.010",
"type": "media",
"sibling_ordinal": 2,
"position": 2,
"ordinal": 2,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v003.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.011",
"type": "media",
"sibling_ordinal": 3,
"position": 3,
"ordinal": 3,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v004.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.012",
"type": "media",
"sibling_or
|
dinal": 4,
"position": 4,
"ordinal": 4,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife
|
04493v005.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.013",
"type": "media",
"sibling_ordinal": 5,
"position": 5,
"ordinal": 5,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v006.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.017",
"type": "media",
"sibling_ordinal": 6,
"position": 6,
"ordinal": 6,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v007.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.018",
"type": "media",
"sibling_ordinal": 7,
"position": 7,
"ordinal": 7,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v008.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.019",
"type": "media",
"sibling_ordinal": 8,
"position": 8,
"ordinal": 8,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v009.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.020",
"type": "media",
"sibling_ordinal": 9,
"position": 9,
"ordinal": 9,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v010.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.021",
"type": "media",
"sibling_ordinal": 10,
"position": 10,
"ordinal": 10,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v011.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.022",
"type": "media",
"sibling_ordinal": 11,
"position": 11,
"ordinal": 11,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v012.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.023",
"type": "media",
"sibling_ordinal": 12,
"position": 12,
"ordinal": 12,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v013.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.024",
"type": "media",
"sibling_ordinal": 13,
"position": 13,
"ordinal": 13,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v014.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.025",
"type": "media",
"sibling_ordinal": 14,
"position": 14,
"ordinal": 14,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v015.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.026",
"type": "media",
"sibling_ordinal": 15,
"position": 15,
"ordinal": 15,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v016.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.027",
"type": "media",
"sibling_ordinal": 16,
"position": 16,
"ordinal": 16,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v017.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.031",
"type": "media",
"sibling_ordinal": 17,
"position": 17,
"ordinal": 17,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v018.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.032",
"type": "media",
"sibling_ordinal": 18,
"position": 18,
"ordinal": 18,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v019.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.033",
"type": "media",
"sibling_ordinal": 19,
"position": 19,
"ordinal": 19,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v020.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.034",
"type": "media",
"sibling_ordinal": 20,
"position": 20,
"ordinal": 20,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v021.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.035",
"type": "media",
"sibling_ordinal": 21,
"position": 21,
"ordinal": 21,
},
{
"mime-subtype": "mov",
"mimetype": "video",
"xlink_href": "elife04493v022.mov",
"content-type": "glencoe play-in-place height-250 width-310",
"component_doi": "10.7554/eLife.04493.036",
"type": "media",
"sibling_ordinal": 22,
"position": 22,
"ordinal": 22,
},
{
"mime-subtype": "docx",
"mimetype": "application",
"xlink_href": "elife04493s001.docx",
"type": "media",
"sibling_ordinal": 1,
"parent_type": "supplementary-material",
"parent_ordinal": 1,
"parent_asset": "supp",
"parent_sibling_ordinal": 1,
"parent_component_doi": "10.7554/eLife.04493.039",
"position": 23,
"ordinal": 23,
},
]
|
ardi69/pyload-0.4.10
|
pyload/plugin/crypter/FiredriveCom.py
|
Python
|
gpl-3.0
| 474
| 0.012658
|
# -*- coding: utf-8 -*-
from pyload.plugin.internal.DeadCrypter import DeadCrypter
class Fi
|
redriveCom(DeadCrypter):
__name = "FiredriveCom"
__type = "crypter"
__version = "0.03"
__pattern = r'https?://(?:www\.)?(firedrive|putlocker)\.com/share/.+'
__config = [] #@TODO
|
: Remove in 0.4.10
__description = """Firedrive.com folder decrypter plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
|
varadarajan87/piernik
|
bin/generate_public.py
|
Python
|
gpl-3.0
| 1,873
| 0.003203
|
#!/usr/bin/env python
imp
|
ort qa
import re
i
|
mport numpy
have_use = re.compile("^\s{1,12}use\s")
remove_warn = re.compile('''(?!.*QA_WARN .+)''', re.VERBOSE)
unwanted = re.compile("(\s|&|\n)", re.VERBOSE)
def do_magic(files, options):
name = files[0]
glob = []
temp = []
for f in files[2:]:
lines = open(f, 'r').readlines()
temp = qa2.remove_amp(filter(remove_warn.match, lines), True)
uses = [f for f in filter(have_use.search, temp) if (
re.match("\s{0,9}use " + name, f))]
for f in uses:
glob.extend(f.split("only: ")[1].strip().split(','))
return numpy.unique([unwanted.sub('', f) for f in glob])
def pretty_format(list, col):
print " public :: &"
str = " & "
for item in list:
if(len(str) + len(item) + 2 > int(col)):
print str + "&"
str = " & "
str = str + item + ", "
print str.rstrip(", ")
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog module_name line_lenght FILES\n\nExample: bin/generate_public.py grid 140 $(find . -name \"*F90\")"
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="make lots of noise [default]")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose",
help="be vewwy quiet (I'm hunting wabbits)")
parser.add_option("-f", "--force",
action="store_true", dest="force",
help="commit despite errors (It will be logged)")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
tab = do_magic(args, options)
pretty_format(tab, args[1])
|
zengxs667/tiblog
|
asterisk/admin/__init__.py
|
Python
|
mit
| 87
| 0.011494
|
from flask
|
import Blueprint
admin = Blueprint("admin", __name__)
from . import views
| |
botify-labs/moto
|
tests/test_sts/test_server.py
|
Python
|
apache-2.0
| 1,095
| 0
|
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
'''
Test the different serv
|
er responses
'''
def test_sts_get_session_token():
backend = server.create_backend_app("sts")
test_client = backend.test_client()
res = test_client.get('/?Action=GetSessionToken')
res.status_code.should.equal(200)
res.data.should.contain(b"SessionToken")
res.data.should.contain(b"AccessKeyId")
def test_sts_get_federation_token():
backend = server.create_backend_app("sts")
test_client = backend.test_client()
|
res = test_client.get('/?Action=GetFederationToken&Name=Bob')
res.status_code.should.equal(200)
res.data.should.contain(b"SessionToken")
res.data.should.contain(b"AccessKeyId")
def test_sts_get_caller_identity():
backend = server.create_backend_app("sts")
test_client = backend.test_client()
res = test_client.get('/?Action=GetCallerIdentity')
res.status_code.should.equal(200)
res.data.should.contain(b"Arn")
res.data.should.contain(b"UserId")
res.data.should.contain(b"Account")
|
PW-Sat2/PWSat2OBC
|
integration_tests/telecommand/compile_info.py
|
Python
|
agpl-3.0
| 236
| 0.004237
|
from
|
telecommand import Telecommand
class GetCompileInfoTelecommand(Telecommand):
def __init__(self):
Telecommand.__init__(self)
def apid(self):
return 0x27
|
def payload(self):
return []
|
kapteyn-astro/kapteyn
|
doc/source/EXAMPLES/mu_externaldata.py
|
Python
|
bsd-3-clause
| 977
| 0.0174
|
from kapteyn import maputils
from matplotlib import pylab as plt
import numpy
header = {'NAXIS' : 2, 'NAXIS1': 800, 'NAXIS2': 800,
'CTYPE1' : 'RA---TAN',
'CRVAL1' : 0.0, 'CRPIX1' : 1, 'CUNIT1' : 'deg', 'CDELT1' : -0.05,
'CTYPE2' : 'DEC--TAN',
'CRVAL2' : 0.0, 'CRPIX2' : 1, 'CUNIT2' : 'deg', 'CDELT2' : 0.05,
}
nx = header['NAXIS1']
ny = header['NAXIS2']
sizex1 = nx/2.0; sizex2
|
= nx - sizex1
sizey1 = nx/2.0; sizey2 = nx - sizey1
x, y = numpy.mgrid[-sizex1:sizex2, -sizey1:sizey2]
edata = numpy.exp(-(x**2/float(sizex1*10)+y**2/float(sizey1*10)))
f = maputils.FITSimage(externalheader=header, externaldata=edata)
f.writetofits()
fig = plt.figure(figsize=(6,5))
frame = fig.add_axes([0.1,0.1, 0.82,0.82])
mplim = f.Annotatedimage(frame, cmap='pink')
mplim.Image()
gr = mplim.Graticule()
gr.setp_grat
|
line(color='y')
mplim.plot()
mplim.interact_toolbarinfo()
mplim.interact_imagecolors()
mplim.interact_writepos()
plt.show()
|
oskarm91/sis
|
sis/settings/test.py
|
Python
|
bsd-3-clause
| 435
| 0
|
"""
This is an example settings/test.py file.
Use this settings file when running tests.
These settings overrides what's in settings/base.py
"""
from .base import *
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sql
|
ite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
SECRET_KEY = '*k3tkxu5a*08f9ann#5sn!3qc&o2nkr-+z)0=k
|
mm7md9!z7=^k'
|
Azure/azure-sdk-for-python
|
sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_spark_job_definition_operations.py
|
Python
|
mit
| 45,127
| 0.005097
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_spark_job_definitions_by_workspace_request(
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_spark_job_definition_request_initial(
spark_job_definition_name: str,
*,
json: JSONType = None,
content: Any = None,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions/{sparkJobDefinitionName}')
path_format_arguments = {
"sparkJobDefinitionName": _SERIALIZER.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str')
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_spark_job_definition_request(
spark_job_definition_name: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions/{sparkJobDefinitionName}')
path_format_arguments = {
"sparkJobDefinitionName": _SERIALIZER.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_spark_job_definition_request_initial(
spark_job_definition_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions/{sparkJobDefinitionName}')
path_format_arguments = {
"sparkJobDefinitionName": _SERIALIZER.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_execute_spark_job_definition_request_initial(
spark_job_definition_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions/{sparkJobDefinitionName}/execute')
path_format_arguments = {
"sparkJobDefinitionName": _SERIALIZER.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
|
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRe
|
quest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_rename_spark_job_definition_request_initial(
spark_job_definition_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-12-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/sparkJobDefinitions/{sparkJobDefinitionName}/rename')
path_format_arguments = {
"sparkJobDefinitionName": _SERIALIZER.url("spark_job_definition_name", spark_job_definition_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not N
|
abadger/Bento
|
bento/commands/hooks.py
|
Python
|
bsd-3-clause
| 3,935
| 0.00737
|
import os
import sys
import re
from bento.compat \
import \
inspect as compat_inspect
from bento.commands.core \
import \
command
SAFE_MODULE_NAME = re.compile("[^a-zA-Z_]")
__HOOK_REGISTRY = {}
__PRE_HOOK_REGISTRY = {}
__POST_HOOK_REGISTRY = {}
__COMMANDS_OVERRIDE = {}
__INIT_FUNCS = {}
def add_to_registry(func, category):
global __HOOK_REGISTRY
if not category in __HOOK_REGISTRY:
__HOOK_REGISTRY[category] = [func]
else:
__HOOK_REGISTRY[category].append(func)
def override_command(command, func):
global __COMMANDS_OVERRIDE
local_dir = os.path.dirname(compat_inspect.stack()[2][1])
if __COMMANDS_OVERRIDE.has_key(command):
__COMMANDS_OVERRIDE[command].append((func, local_dir))
else:
__COMMANDS_OVERRIDE[command] = [(func, local_dir)]
def add_to_pre_registry(func, cmd_name):
global __PRE_HOOK_REGISTRY
if not cmd_name in __PRE_HOOK_REGISTRY:
__PRE_HOOK_REGISTRY[cmd_name] = [func]
else:
__PRE_HOOK_REGISTRY[cmd_name].append(func)
def add_to_post_registry(func, cmd_name):
global __POST_HOOK_REGISTRY
if not cmd_name in __POST_HOOK_REGISTRY:
__POST_HOOK_REGISTRY[cmd_name] = [func]
else:
__POST_HOOK_REGIST
|
RY[cmd_name].append(func)
def get_regist
|
ry_categories():
global __HOOK_REGISTRY
return __HOOK_REGISTRY.keys()
def get_registry_category(categorie):
global __HOOK_REGISTRY
return __HOOK_REGISTRY[categorie]
def get_pre_hooks(cmd_name):
global __PRE_HOOK_REGISTRY
return __PRE_HOOK_REGISTRY.get(cmd_name, [])
def get_post_hooks(cmd_name):
global __POST_HOOK_REGISTRY
return __POST_HOOK_REGISTRY.get(cmd_name, [])
def get_command_override(cmd_name):
global __COMMANDS_OVERRIDE
return __COMMANDS_OVERRIDE.get(cmd_name, [])
def _make_hook_decorator(command_name, kind):
name = "%s_%s" % (kind, command_name)
help_bypass = False
def decorator(f):
local_dir = os.path.dirname(compat_inspect.stack()[1][1])
add_to_registry((f, local_dir, help_bypass), name)
if kind == "post":
add_to_post_registry((f, local_dir, help_bypass), command_name)
elif kind == "pre":
add_to_pre_registry((f, local_dir, help_bypass), command_name)
else:
raise ValueError("invalid hook kind %s" % kind)
return f
return decorator
post_configure = _make_hook_decorator("configure", "post")
pre_configure = _make_hook_decorator("configure", "pre")
post_build = _make_hook_decorator("build", "post")
pre_build = _make_hook_decorator("build", "pre")
post_sdist = _make_hook_decorator("sdist", "post")
pre_sdist = _make_hook_decorator("sdist", "pre")
def override(f):
override_command(f.__name__, f)
def options(f):
__INIT_FUNCS["options"] = f
return lambda context: f(context)
def startup(f):
__INIT_FUNCS["startup"] = f
return lambda context: f(context)
def shutdown(f):
__INIT_FUNCS["shutdown"] = f
return lambda context: f(context)
def dummy_startup(ctx):
pass
def dummy_options(ctx):
pass
def dummy_shutdown():
pass
def create_hook_module(target):
import imp
safe_name = SAFE_MODULE_NAME.sub("_", target, len(target))
module_name = "bento_hook_%s" % safe_name
main_file = os.path.abspath(target)
module = imp.new_module(module_name)
module.__file__ = main_file
code = open(main_file).read()
sys.path.insert(0, os.path.dirname(main_file))
try:
exec(compile(code, main_file, 'exec'), module.__dict__)
sys.modules[module_name] = module
finally:
sys.path.pop(0)
module.root_path = main_file
if not "startup" in __INIT_FUNCS:
module.startup = dummy_startup
if not "options" in __INIT_FUNCS:
module.options = dummy_options
if not "shutdown" in __INIT_FUNCS:
module.shutdown = dummy_shutdown
return module
|
stanzheng/advent-of-code
|
2017/day5/main.py
|
Python
|
apache-2.0
| 5,444
| 0.004041
|
def main(i):
ins = i.split("\n")
arr = [int(i) for i in ins]
index =0
iterations = 0
while (index>=0 and index < len(arr)):
arr[index], index = arr[index] + 1, index + arr[index];
iterations = iterations+1
print(iterations)
def main2(i):
ins = i.split("\n")
arr = [int(i) for i in ins]
index =0
iterations = 0
while (index>=0 and index < len(arr)):
if arr[index] >= 3:
arr[index], index = arr[index] - 1, index + arr[index];
else:
arr[index], index = arr[index] + 1, index + arr[index];
iterations = iterations + 1
print(iterations)
i = """0
1
0
1
0
-1
0
1
2
2
-8
-7
-3
1
0
-2
-6
-7
-11
2
-11
0
-18
0
-18
-1
1
-16
-3
-28
-10
-6
-11
-6
-17
-20
-15
-31
-37
-34
-14
-35
-34
-17
-28
-20
-12
-41
-29
-8
-1
-50
-46
-26
-41
-33
-17
0
-28
-52
-38
-28
-29
-60
-23
-60
-55
-28
-43
-57
-66
-35
-48
-71
-25
-6
-27
-47
-77
-68
-21
2
-39
-82
-2
-59
-61
-67
-26
-11
0
-68
-85
-10
-62
-49
-28
-15
-34
-55
-92
-92
-37
-82
-49
-86
-25
-24
-81
-86
-6
-48
-79
-22
-30
-1
-63
-77
-64
-70
-86
-118
-36
-44
-50
-70
-76
-5
-72
-72
-84
-1
-104
-116
-18
-69
-78
-23
-99
-69
-32
-26
-4
-134
-22
-18
-70
-95
-13
-136
-73
-131
-24
-101
-136
-29
-132
-154
-108
-127
-48
-134
-122
-162
-2
-61
-9
-4
-126
-146
-161
-157
-116
-95
-83
-36
-86
-57
-42
-103
-73
1
0
-28
-156
-67
-178
-36
-169
-46
-16
-97
-86
-112
-186
-111
-69
-158
-37
-75
-109
-186
-16
-84
-73
-83
-139
-54
-89
-191
-126
-15
-158
-19
-116
-73
-13
-184
-121
-14
-116
-167
-174
-103
-66
-128
-156
-5
-174
-220
-213
-96
-139
-22
-102
-33
-118
-163
-184
-17
-76
-72
-96
-106
-203
-55
-181
-207
-40
-235
-139
-5
-127
-21
-155
-183
-51
-54
-38
-247
-218
-56
-34
-173
-241
-187
-38
-13
-172
-2
-235
-167
-191
-250
-150
-34
-151
-183
-119
-90
-21
-93
-275
-168
-160
-97
-100
-25
-273
-245
-44
-223
-201
-156
-12
-55
-189
-181
-10
-92
-152
-90
-217
-68
-81
-76
-86
-48
-287
-281
-63
-83
-66
-50
-49
-310
-254
-121
-294
-132
-53
-30
-223
-85
-297
-264
-58
-51
-294
-283
-3
0
-262
-33
-136
-14
-238
-6
-312
-17
-328
-299
-245
-266
-6
-330
-117
-172
-260
-224
-139
-156
-165
-13
-243
-173
-42
-67
-7
-148
-1
-105
-205
-223
-122
-82
-221
-317
-330
-240
-189
-12
-268
-243
-177
-120
-320
-127
-351
-178
-219
-351
-128
-28
-227
-188
-195
-205
-204
-283
-316
-276
-319
-312
-337
-318
-136
-33
-307
-397
-387
-303
-12
-347
-112
-171
-222
-358
-215
-71
-99
-108
-24
-291
-344
-97
-99
-6
-270
-327
-32
-387
-402
-13
-175
-243
-374
-422
-382
-152
-420
-266
-326
-37
-215
-357
-423
-16
-272
-357
-87
-184
-21
-351
-300
-219
-390
-12
-15
-78
-69
-35
-308
-303
-300
-265
-440
-19
-117
-87
-218
-163
-317
-42
-55
-185
-245
-196
-183
-327
-467
-102
-432
-162
-202
-39
-179
-301
-237
-299
-33
-198
-127
-138
-454
-46
-87
-362
-448
-382
-42
-358
-475
-350
-50
-380
-316
-380
-463
-108
-405
-139
-480
-30
-212
-308
-239
-223
-306
-81
-89
-172
-304
-87
-380
-394
-507
-392
-98
-403
-155
-13
-197
-66
-244
-401
-278
-391
-64
-460
-368
-178
-145
-440
-49
-369
-418
-332
-200
-294
-495
-104
-5
-261
-168
-392
-230
-154
-472
-404
-472
-307
-256
-169
-330
-500
-365
-146
-133
-84
-336
-405
-555
-74
-68
-354
-552
-108
-80
-406
-164
-119
-487
-151
-113
-244
-471
-80
-312
-495
-556
-76
-24
-546
-493
-340
-464
-328
-7
-474
-246
-237
-40
-199
-346
-330
-139
-284
-435
-83
-210
-423
-361
-56
-271
-140
-162
-232
-391
-42
-99
-590
2
-271
-101
-114
-117
-310
-502
-287
-319
-323
-362
-551
-439
-533
-183
-404
-401
-343
-36
-89
-454
-128
-611
-6
-619
-110
-389
-290
-270
-375
-283
-472
-65
-195
-129
-61
-548
-151
-74
-612
-156
-371
-42
-447
-565
-394
-550
-476
-592
-262
-96
-529
-395
-204
-491
-167
-186
-527
-508
-245
-455
-552
-672
-338
-269
-104
-240
-77
-303
-227
-453
-126
-294
-572
-8
-527
-361
-438
-457
-513
-560
-442
-649
-321
-123
-52
-166
-320
-301
-570
-684
-325
-515
-547
-52
-221
-488
-182
-618
-109
-497
-167
-288
-358
-334
-313
-288
-102
-409
-143
-204
-216
-681
-512
-245
-301
-35
-262
-239
-405
-682
-715
-438
-314
-179
-611
-667
-622
-511
-463
-370
-338
-434
-580
-637
-201
-213
-357
-443
-382
-315
-483
-399
-624
-318
-226
-652
-638
-743
-330
-647
-146
-138
-698
-511
-173
-663
-333
-564
-160
-239
-243
-91
-65
-468
-256
-197
-210
-575
-420
-715
-681
-454
-226
-226
-339
-473
-737
-62
-149
-351
-770
-313
-216
-491
-511
-269
-628
-391
-429
-110
-199
-409
-516
-7
-433
-405
-792
-685
-615
-287
-385
-627
-527
-426
-626
-164
-767
-794
-115
-483
-323
-371
-679
-772
-808
-2
-16
-459
-749
-569
-139
-7
-555
-161
-613
-230
-771
-825
-241
-579
-710
-73
-790
-653
-655
-394
-218
-711
-467
-774
-694
-664
-357
-29
-121
-643
-742
-388
-633
-440
-755
-581
-661
-653
-536
-596
-10
-796
-230
-813
-125
-540
-584
-389
-144
-346
-213
-444
-205
-712
-651
-670
-139
-60
-620
-49
-284
-212
-452
-520
-243
-356
-348
-442
-585
-202
-207
-222
-47
-49
-408
-571
-154
-695
-802
-524
-523
-617
-615
-571
-92
-344
-675
-613
-759
-29
-833
-662
-223
-46
-156
-373
-412
-848
-93
-695
-250
-810
-477
-150
-282
-789
-193
-443
-193
-159
-840
-755
-508
-404
-307
-80
-320
-14
-245
-746
-610
-855
-552
-323
-366
-45
-16
-335
-852
-46
-459
-461
-537
-547
-180
-842
-213
-447
-712
-633
-362
-953
-407
-47
0
-466
-107
-648
-528
-413
-828
-217
|
-484
-969
-121
-858
-208
-618
-384
-1
|
6
-91
-662
-348
-675
-63
-713
-966
-678
-293
-827
-445
-387
-212
-763
-847
-756
-299
-443
-80
-286
-954
-521
-394
-357
-861
-530
-649
-671
-437
-884
-606
-73
-452
-354
-729
-927
-248
-2
-738
-521
-440
-435
-291
-104
-402
-375
-875
-686
-812
-539
-934
-536
-924
-924
-365"""
# i = """0
# 3
# 0
# 1
# -3"""
main(i)
main2(i)
|
alpine9000/amiga_examples
|
tools/external/amitools/amitools/scan/FileScanner.py
|
Python
|
bsd-2-clause
| 3,414
| 0.01406
|
# scan a set of file
from __future__ import print_function
import os
import fnmatch
import tempfile
from ScanFile import ScanFile
class FileScanner:
def __init__(self, handler=None, ignore_filters=None, scanners=None,
error_handler=None, ram_bytes=10 * 1024 * 1024,
skip_handler=None):
"""the handler will be called with all the scanned files.
the optional ignore_filters contains a list of glob pattern to
ignore file names"""
self.handler = handler
self.error_handler = error_handler
self.skip_handler = skip_handler
self.ignore_filters = ignore_filters
self.scanners = scanners
self.ram_bytes = ram_bytes
def scan(self, path):
"""start scanning a path. either a file or directory"""
if os.path.isdir(path):
return self._scan_dir(path)
elif os.path.isfile(path):
return self._scan_file(path)
else:
return True
def scan_obj(self, scan_file, check_ignore=True):
"""pass a ScanFile to check"""
if check_ignore and self._is_ignored(scan_file.get_local_path()):
return False
# does a scanner match?
sf = scan_file
sc = self.scanners
if sc is not None:
for s in sc:
if s.can_handle(sf):
ok = s.handle(sf, self)
sf.close()
return ok
# no match call user's handler
ok = self._call_handle
|
r(sf)
sf.close()
return ok
def _scan_dir(self, path):
if self._is_ignored(path):
return True
for root, dirs, files in os.walk(path):
for name in files:
if not self._scan_file(os.path.join(root,name)):
return False
for name in dirs:
if not self._scan_dir(os.path.join(root,name)):
return False
re
|
turn True
def _scan_file(self, path):
if self._is_ignored(path):
return True
# build a scan file
try:
size = os.path.getsize(path)
with open(path, "rb") as fobj:
sf = ScanFile(path, fobj, size, True, True)
return self.scan_obj(sf, False)
except IOError as e:
eh = self.error_handler
if eh is not None:
sf = ScanFile(path, None, 0)
return eh(sf, e)
else:
# ignore error
return True
def _is_ignored(self, path):
if self.ignore_filters is not None:
base = os.path.basename(path)
for f in self.ignore_filters:
if fnmatch.fnmatch(base, f):
return True
return False
def _call_handler(self, scan_file):
if self.handler is not None:
return self.handler(scan_file)
else:
return True
def _call_skip_handler(self, scan_file):
if self.skip_handler is not None:
return self.skip_handler(scan_file)
else:
return True
def promote_scan_file(self, scan_file, seekable=False, file_based=False):
if not seekable and not file_base:
return scan_file
fb = file_based
if not fb and seekable and scan_file.size > self.ram_bytes:
fb = True
sf = scan_file.create_clone(seekable, fb)
scan_file.close()
return sf
# mini test
if __name__ == '__main__':
import sys
ifs = ['*.txt']
def handler(scan_file):
print(scan_file)
return True
def error_handler(scan_file, error):
print("FAILED:", scan_file, error)
raise error
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
knxd/pKNyX
|
tests/common/singleton.py
|
Python
|
gpl-3.0
| 617
| 0.006483
|
# -*- coding: utf-8 -*-
from pyknyx.common.singleton import *
import unittest
# Mute logger
from pyknyx.services.logger import logging; logger = logging.getLogger(__name__)
from pyknyx.services.logger import logging
logger = logging.getLogger(__name__)
logging.getLogger("pyknyx").setLevel(logging.ERROR)
@six.add_meta
|
class(Singleton)
class SingletonTest(object):
pass
class SingletonTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_constructor(self):
s1 =
|
SingletonTest()
s2 = SingletonTest()
self.assertIs(s1, s2)
|
gboone/wedding.harmsboone.org
|
rsvp/migrations/0020_auto__add_field_guest_hotel.py
|
Python
|
mit
| 8,280
| 0.005797
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field guests on 'Event'
db.delete_table(db.shorten_name(u'rsvp_event_guests'))
# Adding field 'Guest.hotel'
db.add_column(u'rsvp_guest', 'hotel',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Hotel'], null=True, blank=True),
keep_default=False)
# Adding M2M table for field events on 'Guest'
m2m_table_name = db.shorten_name(u'rsvp_guest_events')
db.create_table(m2m_table_name, (
('
|
id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('guest', models.ForeignKey(orm[u'rsvp.guest'], null=False)),
('event', models.ForeignKey(orm[u'rsvp.event'], null=False))
))
db.create_unique(m2m_table_name, ['guest_id', 'event_id'])
# Removing M2M table for field gu
|
ests on 'Table'
db.delete_table(db.shorten_name(u'rsvp_table_guests'))
# Removing M2M table for field guests on 'Hotel'
db.delete_table(db.shorten_name(u'rsvp_hotel_guests'))
# Removing M2M table for field guests on 'Room'
db.delete_table(db.shorten_name(u'rsvp_room_guests'))
def backwards(self, orm):
# Adding M2M table for field guests on 'Event'
m2m_table_name = db.shorten_name(u'rsvp_event_guests')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm[u'rsvp.event'], null=False)),
('guest', models.ForeignKey(orm[u'rsvp.guest'], null=False))
))
db.create_unique(m2m_table_name, ['event_id', 'guest_id'])
# Deleting field 'Guest.hotel'
db.delete_column(u'rsvp_guest', 'hotel_id')
# Removing M2M table for field events on 'Guest'
db.delete_table(db.shorten_name(u'rsvp_guest_events'))
# Adding M2M table for field guests on 'Table'
m2m_table_name = db.shorten_name(u'rsvp_table_guests')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('table', models.ForeignKey(orm[u'rsvp.table'], null=False)),
('guest', models.ForeignKey(orm[u'rsvp.guest'], null=False))
))
db.create_unique(m2m_table_name, ['table_id', 'guest_id'])
# Adding M2M table for field guests on 'Hotel'
m2m_table_name = db.shorten_name(u'rsvp_hotel_guests')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('hotel', models.ForeignKey(orm[u'rsvp.hotel'], null=False)),
('guest', models.ForeignKey(orm[u'rsvp.guest'], null=False))
))
db.create_unique(m2m_table_name, ['hotel_id', 'guest_id'])
# Adding M2M table for field guests on 'Room'
m2m_table_name = db.shorten_name(u'rsvp_room_guests')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('room', models.ForeignKey(orm[u'rsvp.room'], null=False)),
('guest', models.ForeignKey(orm[u'rsvp.guest'], null=False))
))
db.create_unique(m2m_table_name, ['room_id', 'guest_id'])
models = {
u'rsvp.event': {
'Meta': {'object_name': 'Event'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.guest': {
'Meta': {'ordering': "['-last_name', '-first_name']", 'object_name': 'Guest'},
'arriving': ('django.db.models.fields.DateField', [], {'default': "'2014-08-14'"}),
'attending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'departing': ('django.db.models.fields.DateField', [], {'default': "'2014-08-17'"}),
'display_as': ('django.db.models.fields.CharField', [], {'max_length': '91', 'null': 'True'}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Event']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'hotel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Hotel']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'max_guests': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'nights': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "'None'", 'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primary_email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street_addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zip_code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
u'rsvp.hotel': {
'Meta': {'object_name': 'Hotel'},
'hotel_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'total_guest_count': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'rsvp.location': {
'Meta': {'object_name': 'Location'},
'distance': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.room': {
'Meta': {'object_name': 'Room'},
'hotel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Hotel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupancy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'room_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Roomtype']", 'null': 'True', 'blank': 'True'})
},
u'rsvp.roomtype': {
'Meta': {'object_name': 'Roomtype'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.table': {
'Meta': {'object_name': 'Table'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.d
|
catapult-project/catapult
|
telemetry/telemetry/core/memory_cache_http_server_unittest.py
|
Python
|
bsd-3-clause
| 3,837
| 0.001824
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
from telemetry.core import util
from telemetry.core
|
import memory_cache_http_server
from telemetry.testing import tab_test_case
class RequestHandler(
memory_cache_http_server.MemoryCacheDynamicHTTPRequestHandler):
def ResponseFromHandler(self, path):
content = "Hello from handler"
return self.MakeResponse(content, "text/html", False)
class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
def setUp(self):
super(MemoryCacheHTTPServerTest, self).setUp()
self._test_filename = 'be
|
ar.webm'
test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
self._test_file_size = os.stat(test_file).st_size
def testBasicHostingAndRangeRequests(self):
self.Navigate('blank.html')
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
x = x.strip()
# Test basic html hosting.
self.assertEqual(x, 'Hello world')
file_size = self._test_file_size
last_byte = file_size - 1
# Test byte range request: no end byte.
self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
# Test byte range request: greater than zero start byte.
self.CheckContentHeaders('100-', '100-%d' % last_byte, file_size - 100)
# Test byte range request: explicit byte range.
self.CheckContentHeaders('2-500', '2-500', '499')
# Test byte range request: no start byte.
self.CheckContentHeaders('-228', '%d-%d' % (file_size - 228, last_byte),
'228')
# Test byte range request: end byte less than start byte.
self.CheckContentHeaders('100-5', '100-%d' % last_byte, file_size - 100)
def CheckContentHeaders(self, content_range_request, content_range_response,
content_length_response):
self._tab.ExecuteJavaScript(
"""
var loaded = false;
var xmlhttp = new XMLHttpRequest();
xmlhttp.onload = function(e) {
loaded = true;
};
// Avoid cached content by appending unique URL param.
xmlhttp.open('GET', {{ url }} + "?t=" + Date.now(), true);
xmlhttp.setRequestHeader('Range', {{ range }});
xmlhttp.send();
""",
url=self.UrlOfUnittestFile(self._test_filename),
range='bytes=%s' % content_range_request)
self._tab.WaitForJavaScriptCondition('loaded', timeout=5)
content_range = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Range");')
content_range_response = 'bytes %s/%d' % (content_range_response,
self._test_file_size)
self.assertEqual(content_range, content_range_response)
content_length = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Length");')
self.assertEqual(content_length, str(content_length_response))
def testAbsoluteAndRelativePathsYieldSameURL(self):
test_file_rel_path = 'green_rect.html'
test_file_abs_path = os.path.abspath(
os.path.join(util.GetUnittestDataDir(), test_file_rel_path))
# It's necessary to bypass self.UrlOfUnittestFile since that
# concatenates the unittest directory on to the incoming path,
# causing the same code path to be taken in both cases.
self._platform.SetHTTPServerDirectories(util.GetUnittestDataDir())
self.assertEqual(self._platform.http_server.UrlOf(test_file_rel_path),
self._platform.http_server.UrlOf(test_file_abs_path))
def testDynamicHTTPServer(self):
self.Navigate('test.html', handler_class=RequestHandler)
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
self.assertEqual(x, 'Hello from handler')
|
luminize/libcanopen
|
python/examples/canopen-dump.py
|
Python
|
bsd-3-clause
| 649
| 0.001541
|
#!/usr/bin/python
# ------------------------------------------------------------------------------
# Copyright (C) 2012, Robert Johansson <rob@raditex.nu>, Raditex Control AB
# All rights reserved.
#
# This file is part of the rSCADA system.
#
# rSCADA
# http://www.rSCADA.se
# info@rscada.se
# ------------------------------------------------------------------------------
"""
Read CAN frame, parse into CANopen frame, and dump to STDOUT.
"""
from
|
pycanopen import *
canopen = CANopen()
while True:
canopen_frame = canopen.read_frame()
if canopen_frame:
print canop
|
en_frame
else:
print("CANopen Frame parse error")
|
arun1729/road-network
|
rng/__init__.py
|
Python
|
mit
| 19
| 0.052632
|
d
|
ef rng():
pass
| |
mlhhu2017/identifyDigit
|
marc/mnist_util.py
|
Python
|
mit
| 5,121
| 0.004687
|
# coding: utf-8
from mnist import MNIST
import math
import numpy as np
import itertools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from random import randint
def get_np_array(path='data'):
"""
Get images and install converter:
1. install MNIST from the command line with 'pip install python-mnist'
2. download the data from http://yann.lecun.com/exdb/mnist/
3. extract the .gz files and rename '.' to '-' in the file names
converts mnist images to ndarrays
inputs:
path optional, path to the mnist files (default=='data')
outputs:
train 2d-array with shape (60000,784), training images
train_labels 1d-array with shape (60000,), training labels
test 2d-array with shape (10000,784), test images
test_labels 1d-array with shape (10000,), test labels
"""
mndata = MNIST(path)
train, train_labels = mndata.load_training()
test, test_labels = mndata.load_testing()
return np.array(train), np.array(train_labels), np.array(test), np.array(test_
|
labels)
def show_a_num(num):
"""
Plots a single number
inputs:
num takes 1d-array with shape (784,) containing a single
|
image
outputs:
img matplotlib image
"""
pixels = num.reshape((28,28))
img = plt.imshow(pixels, cmap='gray')
plt.axis("off")
return img
def show_nums(data, nrow=None, xsize=15, ysize=15):
"""
Plots multiple numbers in a "grid"
inputs:
data takes 2d-array with shape (n,784) containing images
nrow optional, number of rows in the output image (default == ceil(sqrt(n)))
xsize optional, specifies output image length in inches (default == 15)
ysize optional, specifies output image height in inches (default == 15)
outputs:
img matplotlib image
"""
n = len(data)
# check if at least one image
if n < 1:
raise ValueError("No image given!")
# if only 1 image print it
if len(data.shape) == 1:
return show_a_num(data)
# number of rows specified?
if nrow == None:
# calculate default
ncol = math.ceil(math.sqrt(n))
nrow = math.ceil(n/ncol)
else:
# calculate number of columns
ncol = math.ceil(n/nrow)
# check if enough images
missing = nrow*ncol - n
if missing != 0:
# fill up with black images
zeros = np.zeros(missing*784)
zeros = zeros.reshape(missing,784)
data = np.vstack((data,zeros))
# reshape the data to the desired output
data = data.reshape((-1,28,28))
data = data.reshape((nrow,-1,28,28)).swapaxes(1,2)
data = data.reshape((nrow*28,-1))
plt.figure(figsize=(xsize,ysize))
img = plt.imshow(data, cmap='gray')
plt.axis("off")
return img
def get_one_num(data, labels, num):
"""
Creates 2d-array containing only images of a single number
inputs:
data takes 2d-array with shape (n,784) containing the images
labels takes 1d-array with shape (n,) containing the labels
num the number you want to filter
outputs:
arr 2d-array only containing a images of num
"""
return np.array([val for idx, val in enumerate(data) if labels[idx] == num])
def get_all_nums(data, labels):
"""
Creates a 1d-array containing 2d-arrays of images for every number
ex. arr[0] = 2d-array containing all images of number 0
inputs:
data takes 2d-array with shape (n,784) containing the images
labels takes 1d-array with shape (n,) containing the labels
outputs:
arr 1d-array containing 2d-arrays for every number
"""
return np.array([get_one_num(data, labels, i) for i in range(10)])
def plot_confusion_matrix(cm, classes, normalize=False,
title='Confusion matrix', cmap=plt.cm.Blues):
"""
Plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
inputs:
cm confusion matrix
classes name of classes
normalize optional, normalize matrix to show percentages (default == False)
title title of the plot (default == 'Confusion matrix')
cmap colormap (default == blue colormap)
outputs:
void
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
mekolat/manachat
|
external/construct/lib/container.py
|
Python
|
gpl-2.0
| 6,139
| 0.00619
|
"""
Various containers.
"""
def recursion_lock(retval, lock_name = "__recursion_lock__"):
def decorator(func):
def wrapper(self, *args, **kw):
if getattr(self, lock_name, False):
return retval
setattr(self, lock_name, True)
try:
return func(self, *args, **kw)
finally:
setattr(self, lock_name, False)
wrapper.__name__ = func.__name__
return wrapper
return decorator
class Container(dict):
"""
A generic container of attributes.
Containers are the common way to express parsed data.
"""
__slots__ = ["__keys_order__"]
def __init__(self, **kw):
object.__setattr__(self, "__keys_order__", [])
for k, v in kw.items():
self[k] = v
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, key, val):
if key not in self:
self.__keys_order__.append(key)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.__keys_order__.remove(key)
__delattr__ = __delitem__
__setattr__ = __setitem__
def clear(self):
dict.clear(self)
del self.__keys_order__[:]
def pop(self, key, *default):
val = dict.pop(self, key, *default)
self.__keys_order__.remove(key)
return val
def popitem(self):
k, v = dict.popitem(self)
self.__keys_order__.remove(k)
return k, v
def update(self, seq, **kw):
if hasattr(seq, "keys"):
for k in seq.keys():
self[k] = seq[k]
else:
for k, v in seq:
self[k] = v
dict.update(self, kw)
def copy(self):
inst = self.__class__()
inst.update(self.iteritems())
return inst
__update__ = update
__copy__ = copy
def __iter__(self):
return iter(self.__keys_order__)
iterkeys = __iter__
def itervalues(self):
return (self[k] for k in self.__keys_order__)
def iteritems(self):
return ((k, self[k]) for k in self.__keys_order__)
def keys(self):
return self.__keys_order__
def values(self):
return list(self.itervalues())
def items(self
|
):
return list(self.iteritems())
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
@rec
|
ursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k, v in self.iteritems():
if not k.startswith("_"):
text = [ind, k, " = "]
if hasattr(v, "__pretty_str__"):
text.append(v.__pretty_str__(nesting + 1, indentation))
else:
text.append(repr(v))
attrs.append("".join(text))
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__ + ":")
return "\n".join(attrs)
__str__ = __pretty_str__
class FlagsContainer(Container):
"""
A container providing pretty-printing for flags.
Only set flags are displayed.
"""
@recursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k in self.keys():
v = self[k]
if not k.startswith("_") and v:
attrs.append(ind + k)
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__+ ":")
return "\n".join(attrs)
class ListContainer(list):
"""
A container for lists.
"""
__slots__ = ["__recursion_lock__"]
def __str__(self):
return self.__pretty_str__()
@recursion_lock("[...]")
def __pretty_str__(self, nesting = 1, indentation = " "):
if not self:
return "[]"
ind = indentation * nesting
lines = ["["]
for elem in self:
lines.append("\n")
lines.append(ind)
if hasattr(elem, "__pretty_str__"):
lines.append(elem.__pretty_str__(nesting + 1, indentation))
else:
lines.append(repr(elem))
lines.append("\n")
lines.append(indentation * (nesting - 1))
lines.append("]")
return "".join(lines)
class LazyContainer(object):
__slots__ = ["subcon", "stream", "pos", "context", "_value"]
def __init__(self, subcon, stream, pos, context):
self.subcon = subcon
self.stream = stream
self.pos = pos
self.context = context
self._value = NotImplemented
def __eq__(self, other):
try:
return self._value == other._value
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.__pretty_str__()
def __pretty_str__(self, nesting = 1, indentation = " "):
if self._value is NotImplemented:
text = "<unread>"
elif hasattr(self._value, "__pretty_str__"):
text = self._value.__pretty_str__(nesting, indentation)
else:
text = str(self._value)
return "%s: %s" % (self.__class__.__name__, text)
def read(self):
self.stream.seek(self.pos)
return self.subcon._parse(self.stream, self.context)
def dispose(self):
self.subcon = None
self.stream = None
self.context = None
self.pos = None
def _get_value(self):
if self._value is NotImplemented:
self._value = self.read()
return self._value
value = property(_get_value)
has_value = property(lambda self: self._value is not NotImplemented)
if __name__ == "__main__":
c = Container(x=5)
c.y = 8
c.z = 9
c.w = 10
c.foo = 5
print (c)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.