repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Kimanicodes/wananchi
|
app/loans/views.py
|
Python
|
mit
| 4,187
| 0.000955
|
from flask import render_template, flash, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user
from . import loans
from forms import LoanApplicationForm, ApproveLoan, RepayLoan
from ..models import db
from ..models import Loan, User
from datetime import date
@loans.route('/new_loan', methods=['GET', 'POST'])
@login_required
def request_loan():
if not current_user.is_borrower:
abort(403)
elif not current_user.is_approved:
abort(404)
elif current_user.is_owing:
flash('You cannot request a new loan if your still due!')
return redirect(url_for('loans.view'))
elif current_user.has_requested_loan:
flash('You cannot request a new loan if your last loan hasnt been approved!')
return redirect(url_for('loans.view'))
else:
form = LoanApplicationForm()
if form.validate_on_submit():
loan = Loan(loan_amt=form.loan_amt.data,
user=current_user._get_current_object
|
())
if loan.loan_amt > loan.user.max_credit_amt:
flash('You can only borrow to a maximum of %s' %
loan.user.max_credit_amt)
return redirect(url_for('loans.request_loan'))
loans.is_requested = True
loan.user.has_requested_loan = True
db.session.add(loan)
db.session.commit()
flash(
'Success
|
.Your Loan Application has been submitted.View it below.')
return redirect(url_for('loans.view'))
return render_template('loans/request_loan.html',
form=form, title="New Loan")
@loans.route('/view_history')
@login_required
def view():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view.html',
loans=loans, title="My Loan Reqests")
@loans.route('/view_payment_history')
@login_required
def view_payment_history():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view-payments.html',
loans=loans, title="My Loan Reqests")
'''View for if the user is credit worthy and can now borrow'''
@loans.route('/repay/loan/<id>', methods=['GET', 'POST'])
@login_required
def repay_loan(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
else:
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
@loans.route('/clear/loan/balance/<id>', methods=['GET', 'POST'])
@login_required
def clear_loan_balance(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
|
cpennington/edx-platform
|
lms/djangoapps/discussion/django_comment_client/permissions.py
|
Python
|
agpl-3.0
| 10,094
| 0.004557
|
# pylint: disable=missing-docstring
"""
Module for checking permissions with the comment_client backend
"""
import logging
import six
from edx_django_utils.cache import DEFAULT_REQUEST_CACHE
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.teams.models import CourseTeam
from openedx.core.djangoapps.django_comment_common.comment_client import Thread
from openedx.core.djangoapps.django_comment_common.models import (
CourseDiscussionSettings,
all_permissions_for_user_in_course
)
from openedx.core.djangoapps.django_comment_common.utils import get_course_discussion_settings
from openedx.core.lib.cache_utils import request_cached
def has_permission(user, permission, course_id=None):
assert isinstance(course_id, (type(None), CourseKey))
request_cache_dict = DEFAULT_REQUEST_CACHE.data
cache_key = "django_comment_client.permissions.has_permission.all_permissions.{}.{}".format(
user.id, course_id
)
if cache_key in request_cache_dict:
all_permissions = request_cache_dict[cache_key]
else:
all_permissions = all_permissions_for_user_in_course(user, course_id)
request_cache_dict[cache_key] = all_permissions
return permission in all_permissions
CONDITIONS = ['is_open', 'is_author', 'is_question_author', 'is_team_member_if_applicable']
@request_cached()
def get_team(commentable_id):
""" Returns the team that the commentable_id belongs to if it exists. Returns None otherwise. """
try:
team = CourseTeam.objects.get(discussion_topic_id=commentable_id)
except CourseTeam.DoesNotExist:
team = None
return team
def _check_condition(user, condition, content):
""" Check whether or not the given condition applies for the given user and content. """
def check_open(_user, content):
""" Check whether the content is open. """
try:
return content and not content['closed']
except KeyError:
return False
def check_author(user, content):
""" Check if the given user is the author of the content. """
try:
return content and content['user_id'] == str(user.id)
except KeyError:
return False
def check_question_author(user, content):
""" Check if the given user is the author of the original question for both threads and comments. """
if not content:
return False
try:
request_cache_dict = DEFAULT_REQUEST_CACHE.data
if content["type"] == "thread":
cache_key = "django_comment_client.permissions._check_condition.check_question_author.{}.{}".format(
user.id, content['id']
)
if cache_key in request_cache_dict:
return request_cache_dict[cache_key]
else:
result = content["thread_type"] == "question" and content["user_id"] == str(user.id)
request_cache_dict[cache_key] = result
return result
else:
cache_key = "django_comment_client.permissions._check_condition.check_question_author.{}.{}".format(
user.id, content['thread_id']
)
if cache_key in request_cache_dict:
return request_cache_dict[cache_key]
else:
# make the now-unavoidable comments service query
thread = Thread(id=content['thread_id']).to_dict()
return check_question_author(user, thread)
except KeyError:
return False
def check_team_member(user, content):
"""
If the content has a commentable_id, verifies that either it is not associated with a team,
or if it is, that the user is a member of that team.
"""
if not content:
return False
try:
commentable_id = content['commentable_id']
request_cache_dict = DEFAULT_REQUEST_CACHE.data
cache_key = u"django_comment_client.check_team_member.{}.{}".format(user.id, commentable_id)
if cache_key in request_cache_dict:
return request_cache_dict[cache_key]
team = get_team(commentable_id)
if team is None:
passes_condition = True
else:
passes_condition = team.users.filter(id=user.id).exists()
request_cache_dict[cache_key] = passes_condition
except KeyError:
# We do not expect KeyError in production-- it usually indicates an improper test mock.
logging.warning("Did not find key commentable_id in content.")
passes_condition = False
return passes_condition
handlers = {
'is_open': check_open,
'is_author': check_author,
'is_question_author': check_question_author,
'is_team_member_if_applicable': check_team_member
}
return handlers[condition](user, content)
def _check_conditions_permissions(user, permissions, course_id, content, user_group_id=None, content_user_group=None):
"""
Accepts a list of permissions and proceed if any of the permission is valid.
Note that ["can_view", "can_edit"] will proceed if the user has either
"can_view" or "can_edit" permission. To use AND operator in between, wrap them in
a list.
"""
def test(user, per, operator="or"):
if isinstance(per, six.string_types):
if per in CONDITIONS:
return _check_condition(user, per, content)
if 'group_' in per:
# If a course does not have divided discussions
# or a course has divided discussions, but the current user's content group does not equal
# the content group of the commenter/poster,
# then the current user does not have group edit permissions.
division_scheme = get_course_d
|
iscussion_settings(course_id).division_scheme
if (division_scheme is CourseDiscussionSetting
|
s.NONE
or user_group_id is None
or content_user_group is None
or user_group_id != content_user_group):
return False
return has_permission(user, per, course_id=course_id)
elif isinstance(per, list) and operator in ["and", "or"]:
results = [test(user, x, operator="and") for x in per]
if operator == "or":
return True in results
elif operator == "and":
return False not in results
return test(user, permissions, operator="or")
# Note: 'edit_content' is being used as a generic way of telling if someone is a privileged user
# (forum Moderator/Admin/TA), because there is a desire that team membership does not impact privileged users.
VIEW_PERMISSIONS = {
'update_thread': ['group_edit_content', 'edit_content', ['update_thread', 'is_open', 'is_author']],
'create_comment': ['group_edit_content', 'edit_content', ["create_comment", "is_open",
"is_team_member_if_applicable"]],
'delete_thread': ['group_delete_thread', 'delete_thread', ['update_thread', 'is_author']],
'update_comment': ['group_edit_content', 'edit_content', ['update_comment', 'is_open', 'is_author']],
'endorse_comment': ['endorse_comment', 'is_question_author'],
'openclose_thread': ['group_openclose_thread', 'openclose_thread'],
'create_sub_comment': ['group_edit_content', 'edit_content', ['create_sub_comment', 'is_open',
'is_team_member_if_applicable']],
'delete_comment': ['group_delete_comment', 'delete_comment', ['update_comment', 'is_open', 'is_author']],
'vote_for_comment': ['group_edit_content', 'edit_content', ['vote', 'is_open', 'is_team_member_if_applicable']],
'undo_vote_for_comment': ['group_edit_content', 'edit_content', ['unvote', 'is_open',
'is_team_member_if_app
|
tokyo-jesus/tamactiluya
|
tamactiluya/auth.py
|
Python
|
mit
| 413
| 0.002421
|
# -*- coding: utf-8 -*-
from
|
flask_login import LoginManager
from tamactiluya.models import User
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'user.login'
@login_manager.user_loader
def user_loader(uname) -> User or None:
"""
:param uname:
:return:
"""
try:
return User(uname)
ex
|
cept User.NotFound:
return None
|
abilian/abilian-sbe
|
src/abilian/sbe/apps/documents/webdav/__init__.py
|
Python
|
lgpl-2.1
| 121
| 0
|
"""WebDAV interface to the docum
|
ent reposit
|
ory."""
from __future__ import annotations
from .views import webdav # noqa
|
Valchris/tdoa
|
client/libraries/swamp/chat_example/chat_example/chat/routers.py
|
Python
|
mit
| 771
| 0
|
from swampdragon import route_handler
from swampdragon.route_handler import BaseRouter
class ChatRouter(BaseRouter):
route_name = 'chat-route'
valid_verbs = ['chat', 'subscribe']
def get_subscription_channels(self, **kwargs):
return ['chatroom']
def chat(self, *args, **kwargs):
errors = {}
if 'name' not in kwargs or len(kwargs['name']) is 0:
errors['name'] = 'Specify a name'
if 'message' not in kwargs or len(kwargs['message']) is 0:
errors['message'] = 'Enter a chat message'
if errors:
self.send_error(errors)
else:
|
self.send({'status': 'ok'})
self.publish(self.get_subscription_channels(),
|
kwargs)
route_handler.register(ChatRouter)
|
sergeyglazyrindev/amigrations
|
amigrations/adapters/exceptions.py
|
Python
|
mit
| 36
| 0
|
class URIE
|
rror(Exceptio
|
n):
pass
|
PMEAL/OpenPNM
|
openpnm/core/__init__.py
|
Python
|
mit
| 1,947
| 0
|
r"""
Main classes of OpenPNM
=======================
This module contains the main classes from which all other major objects
(Network, Geometry, Physics, Phase, and Algorithm) derive.
The Base class
--------------
The ``Base`` class is a ``dict`` that has added methods for indexing the pores
and throats, applying labels, and managing the stored data. All OpenPNM
object inherit from ``Base`` so possess these methods.
----
``Base`` objects, Networks, Phase, Algorithms, are assigned to all locations
in the domain. The ``Subdomain`` class is a direct descendent of ``Base``
which has the added ability to be assigned to a subset of the domain. Objects
that inherit from ``Subdomain`` are Geomery and Physics.
Boss objects refer to the Full Domain object it is associated with. For
Geomery objects this is the Network, and for Physics objects
|
this is the
Phase that was specified during instantiation.
The associations between an object and it's boss are tracked using labels in
the boss. So a Geometry object named ``geom1`` will put labels 'pore.geom1'
and 'throat.geom1' into the Network dictionary, with ``True`` values indicating
where ``geom1`` applies.
The ModelsMixin class
--------------
|
-------
`Mixins <https://en.wikipedia.org/wiki/Mixin>`_ are a useful feature of Python
that allow a few methods to be added to a class that needs them. In OpenPNM,
the ability to store and run 'pore-scale' models is not needed by some objects
(Network, Algorithms), but is essential to Geometry, Physics, and Phase
objects.
In addition to these methods, the ``ModelsMixin`` also adds a ``models``
attribute to each object. This is a dictionary that stores the pore-scale
models and their associated parameters. When ``regenerate_models`` is called
the function and all the given parameters are retrieved from this dictionary
and run.
"""
from ._models import *
from ._base import *
from ._mixins import *
from ._subdomain import *
|
polaris-gslb/polaris-core
|
polaris_common/topology.py
|
Python
|
bsd-3-clause
| 2,452
| 0.001631
|
# -*- coding: utf-8 -*-
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
"""
ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
|
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
retu
|
rn None
|
VaSe7u/Supernutrient_0_5
|
hash_check.py
|
Python
|
mit
| 195
| 0
|
from flask_bcrypt import generate_password_hash
# Change the number of rounds (second argument) until it takes betwe
|
en
# 0.25 and 0.5 seconds to run.
generate_password_h
|
ash('password1', 8)
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/consts/award_type.py
|
Python
|
mit
| 5,567
| 0
|
import enum
from typing import Dict, Set
from backend.common.consts.event_type import EventType
@enum.unique
class AwardType(enum.IntEnum):
"""
An award type defines a logical type of award that an award falls into.
These types are the same across both years and competitions within a year.
In other words, an industrial design award from 2013casj and
2010cmp will be of award type AwardType.INDUSTRIAL_DESIGN.
An award type must be enumerated for every type of award ever awarded.
ONCE A TYPE IS ENUMERATED, IT MUST NOT BE CHANGED.
Award types don't care about what type of event (Regional, District,
District Championship, Championship Division, Championship Finals, etc.)
the award is from. In other words, RCA and CCA are of the same award type.
"""
CHAIRMANS = 0
WINNER = 1
FINALIST = 2
WOODIE_FLOWERS = 3
DEANS_LIST = 4
VOLUNTEER = 5
FOUNDERS = 6
BART_KAMEN_MEMORIAL = 7
MAKE_IT_LOUD = 8
ENGINEERING_INSPIRATION = 9
ROOKIE_ALL_STAR = 10
GRACIOUS_PROFESSIONALISM = 11
COOPERTITION = 12
JUDGES = 13
HIGHEST_ROOKIE_SEED = 14
ROOKIE_INSPIRATION = 15
INDUSTRIAL_DESIGN = 16
QUALITY = 17
SAFETY = 18
SPORTSMANSHIP = 19
CREATIVITY = 20
ENGINEERING_EXCELLENCE = 21
ENTREPRENEURSHIP = 22
EXCELLENCE_IN_DESIGN = 23
EXCELLENCE_IN_DESIGN_CAD = 24
EXCELLENCE_IN_DESIGN_ANIMATION = 25
DRIVING_TOMORROWS_TECHNOLOGY = 26
IMAGERY = 27
MEDIA_AND_TECHNOLOGY = 28
INNOVATION_IN_CONTROL = 29
SPIRIT = 30
WEBSITE = 31
VISUALIZATION = 32
AUTODESK_INVENTOR = 33
FUTURE_INNOVATOR = 34
RECOGNITION_OF_EXTRAORDINARY_SERVICE = 35
OUTSTANDING_CART = 36
WSU_AIM_HIGHER = 37
LEADERSHIP_IN_CONTROL = 38
NUM_1_SEED = 39
INCREDIBLE_PLAY = 40
PEOPLES_CHOICE_ANIMATION = 41
VISUALIZATION_RISING_STAR = 42
BEST_OFFENSIVE_ROUND = 43
BEST_PLAY_OF_THE_DAY = 44
FEATHERWEIGHT_IN_THE_FINALS = 45
MOST_PHOTOGENIC = 46
OUTSTANDING_DEFENSE = 47
POWER_TO_SIMPLIFY = 48
AGAINST_ALL_ODDS = 49
RISING_STAR = 50
CHAIRMANS_HONORABLE_MENTION = 51
CONTENT_COMMUNICATION_HONORABLE_MENTION = 52
TECHNICAL_EXECUTION_HONORABLE_MENTION = 53
RE
|
ALIZATION = 54
REALIZATION_HONORABLE_MENTION = 55
DESIGN_YOUR_FUTURE = 56
DESIGN_YOUR_FUTURE_HONORABLE_M
|
ENTION = 57
SPECIAL_RECOGNITION_CHARACTER_ANIMATION = 58
HIGH_SCORE = 59
TEACHER_PIONEER = 60
BEST_CRAFTSMANSHIP = 61
BEST_DEFENSIVE_MATCH = 62
PLAY_OF_THE_DAY = 63
PROGRAMMING = 64
PROFESSIONALISM = 65
GOLDEN_CORNDOG = 66
MOST_IMPROVED_TEAM = 67
WILDCARD = 68
CHAIRMANS_FINALIST = 69
OTHER = 70
AUTONOMOUS = 71
INNOVATION_CHALLENGE_SEMI_FINALIST = 72
ROOKIE_GAME_CHANGER = 73
SKILLS_COMPETITION_WINNER = 74
SKILLS_COMPETITION_FINALIST = 75
ROOKIE_DESIGN = 76
ENGINEERING_DESIGN = 77
DESIGNERS = 78
CONCEPT = 79
AWARD_TYPES: Set[AwardType] = {a for a in AwardType}
BLUE_BANNER_AWARDS: Set[AwardType] = {
AwardType.CHAIRMANS,
AwardType.CHAIRMANS_FINALIST,
AwardType.WINNER,
AwardType.WOODIE_FLOWERS,
AwardType.SKILLS_COMPETITION_WINNER,
}
INDIVIDUAL_AWARDS: Set[AwardType] = {
AwardType.WOODIE_FLOWERS,
AwardType.DEANS_LIST,
AwardType.VOLUNTEER,
AwardType.FOUNDERS,
AwardType.BART_KAMEN_MEMORIAL,
AwardType.MAKE_IT_LOUD,
}
# awards not used in the district point model
NON_JUDGED_NON_TEAM_AWARDS: Set[AwardType] = {
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.WOODIE_FLOWERS,
AwardType.DEANS_LIST,
AwardType.VOLUNTEER,
AwardType.WINNER,
AwardType.FINALIST,
AwardType.WILDCARD,
}
NORMALIZED_NAMES = {
AwardType.CHAIRMANS: {None: "Chairman's Award"},
AwardType.CHAIRMANS_FINALIST: {None: "Chairman's Award Finalist"},
AwardType.WINNER: {None: "Winner"},
AwardType.WOODIE_FLOWERS: {
None: "Woodie Flowers Finalist Award",
EventType.CMP_FINALS: "Woodie Flowers Award",
},
}
# Only searchable awards. Obscure & old awards not listed
SEARCHABLE: Dict[AwardType, str] = {
AwardType.CHAIRMANS: "Chairman's",
AwardType.CHAIRMANS_FINALIST: "Chairman's Finalist",
AwardType.ENGINEERING_INSPIRATION: "Engineering Inspiration",
AwardType.COOPERTITION: "Coopertition",
AwardType.CREATIVITY: "Creativity",
AwardType.ENGINEERING_EXCELLENCE: "Engineering Excellence",
AwardType.ENTREPRENEURSHIP: "Entrepreneurship",
AwardType.DEANS_LIST: "Dean's List",
AwardType.BART_KAMEN_MEMORIAL: "Bart Kamen Memorial",
AwardType.GRACIOUS_PROFESSIONALISM: "Gracious Professionalism",
AwardType.HIGHEST_ROOKIE_SEED: "Highest Rookie Seed",
AwardType.IMAGERY: "Imagery",
AwardType.INDUSTRIAL_DESIGN: "Industrial Design",
AwardType.SAFETY: "Safety",
AwardType.INNOVATION_IN_CONTROL: "Innovation in Control",
AwardType.QUALITY: "Quality",
AwardType.ROOKIE_ALL_STAR: "Rookie All Star",
AwardType.ROOKIE_INSPIRATION: "Rookie Inspiration",
AwardType.SPIRIT: "Spirit",
AwardType.VOLUNTEER: "Volunteer",
AwardType.WOODIE_FLOWERS: "Woodie Flowers",
AwardType.JUDGES: "Judges'",
}
# Prioritized sort order for certain awards
SORT_ORDER: Dict[AwardType, int] = {
AwardType.CHAIRMANS: 0,
AwardType.FOUNDERS: 1,
AwardType.ENGINEERING_INSPIRATION: 2,
AwardType.ROOKIE_ALL_STAR: 3,
AwardType.WOODIE_FLOWERS: 4,
AwardType.VOLUNTEER: 5,
AwardType.DEANS_LIST: 6,
AwardType.WINNER: 7,
AwardType.FINALIST: 8,
}
|
crypotex/taas
|
taas/reservation/handlers.py
|
Python
|
gpl-2.0
| 320
| 0.009375
|
from taas.reservation.mode
|
ls import Payment
def delete_payment_before_last_reservation_delete(sender, instance=None, **kwargs):
payment = instance.payment
if payment is None:
return
elif payment.reservation_set.count() == 1 and payment.type == Payment.STAGED:
instance.payment.delete()
| |
karim-omran/openerp-addons
|
scientific_institutions/wizard/__init__.py
|
Python
|
agpl-3.0
| 23
| 0
|
import sci_inst
|
_byname
| |
Karel-van-de-Plassche/bokeh
|
bokeh/application/handlers/code.py
|
Python
|
bsd-3-clause
| 6,686
| 0.004487
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by compiling
and executing Python source code.
This Handler is used by the Bokeh server command line tool to build
applications that run off scripts and notebooks.
.. code-block:: python
def make_doc(doc):
# do work to modify the document, add plots, widgets, etc.
return doc
app = Application(FunctionHandler(make_doc))
server = Server({'/bkapp': app}, io_loop=IOLoop.current())
server.start()
'''
#-------------
|
----------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# St
|
andard library imports
import os
import sys
# External imports
# Bokeh imports
from ...io.doc import set_curdoc, curdoc
from .code_runner import CodeRunner
from .handler import Handler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class CodeHandler(Handler):
''' Run source code which modifies a Document
'''
# These functions, if present in the supplied code, will be monkey patched
# to be no-ops, with a warning.
_io_functions = ['output_notebook', 'output_file', 'show', 'save', 'reset_output']
def __init__(self, *args, **kwargs):
'''
Args:
source (str) : python source code
filename (str) : a filename to use in any debugging or error output
argv (list[str], optional) : a list of string arguments to make
available as ``sys.argv`` when the code executes
'''
super(CodeHandler, self).__init__(*args, **kwargs)
if 'source' not in kwargs:
raise ValueError('Must pass source to CodeHandler')
source = kwargs['source']
if 'filename' not in kwargs:
raise ValueError('Must pass a filename to CodeHandler')
filename = kwargs['filename']
argv = kwargs.get('argv', [])
self._runner = CodeRunner(source, filename, argv)
self._loggers = {}
for f in CodeHandler._io_functions:
self._loggers[f] = self._make_io_logger(f)
# Properties --------------------------------------------------------------
@property
def error(self):
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self):
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self):
''' ``True`` if the handler failed to modify the doc
'''
return self._runner.failed
@property
def safe_to_fork(self):
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the code has already been executed.
'''
return not self._runner.ran
# Public methods ----------------------------------------------------------
def modify_document(self, doc):
'''
'''
if self.failed:
return
module = self._runner.new_module()
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io = self._monkeypatch_io()
try:
def post_check():
newdoc = curdoc()
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
self._runner.run(module, post_check)
finally:
self._unmonkeypatch_io(old_io)
set_curdoc(old_doc)
def url_path(self):
''' The last path component for the basename of the configured filename.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
# Private methods ---------------------------------------------------------
# subclassess must define self._logger_text
def _make_io_logger(self, name):
def logger(*args, **kwargs):
log.info(self._logger_text , self._runner.path, name)
return logger
# monkeypatching is a little ugly, but in this case there's no reason any legitimate
# code should be calling these functions, and we're only making a best effort to
# warn people so no big deal if we fail.
def _monkeypatch_io(self):
import bokeh.io as io
old = {}
for f in CodeHandler._io_functions:
old[f] = getattr(io, f)
setattr(io, f, self._loggers[f])
return old
def _unmonkeypatch_io(self, old):
import bokeh.io as io
for f in old:
setattr(io, f, old[f])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
soneoed/naowalkoptimiser
|
server/MCLLocalisation.py
|
Python
|
gpl-3.0
| 26,832
| 0.011777
|
""" An SIR Particle Filter based localisation system for tracking a robot with ambiguous bearing
Jason Kulk
"""
from NAO import NAO
import numpy, time
class Localisation:
X = 0
Y = 1
THETA = 2
XDOT = 3
YDOT = 4
THETADOT = 5
STATE_LENGTH = 6
VEL_PAST_LENGTH = 13
def __init__(self, M = 1000):
""" """
Localisation.NUM_PARTICLES = M
self.reset = True
self.time = time.time()
self.previoustime = self.time
self.control = numpy.zeros(3) # the current control
self.previouscontrol = self.control # the previous control
self.measurement = numpy.zeros(Localisation.STATE_LENGTH) # the current measurement of the state
self.previousmeasurement = self.measurement # the previous measurement of the state
self.previousmeasurementsigma = numpy.zeros(Localisation.STATE_LENGTH)
self.States = numpy.zeros((Localisation.NUM_PARTICLES, Localisation.STATE_LENGTH)) # the (states) particles
self.PreviousStates = self.States # the previous state of each particle (used for derivative calculations)
self.Weights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES) # the weights of each particle
self.GUIWeights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES) # the weights of each particle before resampling
self.State = self.States[0]
# Variables for the control model:
self.accelerationduration = numpy.array([1.5, 1.5, 0.5]) # the duration an acceleration is applied (s)
self.accelerationmagnitudes = numpy.array([15, 15, 0.7])/self.accelerationduration # the magnitude of the accelerations [forward, sideward, turn] (cm/s/s, rad/s)
self.accelerations = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the current acceleration (cm/s/s) for each particle
self.accelendtimes = numpy.zeros((Localisation.NUM_PARTICLES, 3)) # the times the accelerations will be set to zero given no change in control (s)
self.startedcontrol = False # a small hack that will prevent resampling until the control has started
# Variables for additional velocity filtering!
self.PastVX = list(numpy.zeros(Localisation.VEL_PAST_LENGTH))
self.PastVY = list(numpy.zeros(Localisation.VEL_PAST_LENGTH))
def update(self, control, nao):
""" """
self.time = time.time()
self.control = control
self.measurement = self.__naoToState(nao)
self.measurementsigma = self.__naoToSigma(nao)
if self.reset:
self.__initParticles()
self.reset = False
else:
self.predict()
self.updateWeights()
self.estimateState()
self.resample()
self.previoustime = self.time
self.previousmeasurement = self.me
|
asurement
self.previousmeasurementsigma =
|
self.measurementsigma
self.PreviousStates = numpy.copy(self.States)
def predict(self):
""" Updates each of the particles based on system and control model """
self.modelSystem()
self.modelControl()
def updateWeights(self):
""" """
if not self.startedcontrol: ## this hack prevents particles from disappearing before the robot starts moving
return
# calculate variances for the measurement
# the variance in the velocity is the sum of the current and previous variance in the position measurements
self.measurementsigma[Localisation.XDOT] = max(4.0, (self.measurementsigma[Localisation.X] + self.previousmeasurementsigma[Localisation.X]))
self.measurementsigma[Localisation.YDOT] = max(4.0, (self.measurementsigma[Localisation.Y] + self.previousmeasurementsigma[Localisation.Y]))
self.measurementsigma[Localisation.THETADOT] = max(1.0, 1.5*(self.measurementsigma[Localisation.THETA] + self.previousmeasurementsigma[Localisation.THETA]))
# calculate the weights based on a measurement model
self.Weights *= self._gauss(self.States[:,Localisation.X] - self.measurement[Localisation.X], self.measurementsigma[Localisation.X]) # 1.73
self.Weights *= self._gauss(self.States[:,Localisation.Y] - self.measurement[Localisation.Y], self.measurementsigma[Localisation.Y]) # 1.73
# I need a little outlier rejection here:
anglediff = numpy.fabs(self.measurement[Localisation.THETA] - self.previousmeasurement[Localisation.THETA])
if anglediff > 5*numpy.pi/12 and anglediff < 7*numpy.pi/12:
self.measurementsigma[Localisation.THETA] += 1.0
self.measurementsigma[Localisation.THETADOT] += 15
elif anglediff > numpy.pi/3 and anglediff < 2*numpy.pi/3:
self.measurementsigma[Localisation.THETA] += 0.4
self.measurementsigma[Localisation.THETADOT] += 5
self.Weights *= self._gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] + numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] - numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) + self._gauss(self.States[:,Localisation.THETA] + 2*numpy.pi - self.measurement[Localisation.THETA], self.measurementsigma[Localisation.THETA]) # 0.02 + 0.07
self.Weights *= self._gauss(self.States[:,Localisation.THETADOT] - self.measurement[Localisation.THETADOT], self.measurementsigma[Localisation.THETADOT])
self.Weights *= self._gauss(self.States[:,Localisation.XDOT] - self.measurement[Localisation.XDOT], self.measurementsigma[Localisation.XDOT]) # 2.95 + 1.5
self.Weights *= self._gauss(self.States[:,Localisation.YDOT] - self.measurement[Localisation.YDOT], self.measurementsigma[Localisation.YDOT])
controlvector = self.__controlToVelocityVector()
if controlvector != None:
diffs = numpy.arctan2(self.States[:,Localisation.YDOT], self.States[:,Localisation.XDOT]) - self.States[:,Localisation.THETA]
diffs = numpy.arctan2(numpy.sin(diffs), numpy.cos(diffs)) ## I need to normalise the diffs
self.Weights *= self._gauss(diffs - self.__controlToVelocityVector(), 0.707)
# normalise the weights so that their sum is one
sum = numpy.sum(self.Weights)
if sum != 0:
self.Weights /= sum
else:
print "Oh Noes: All of the weights are zero!"
print "Measurements:", self.measurement, self.previousmeasurement
print "State:", self.State
tempweights = (1.0/Localisation.NUM_PARTICLES)*numpy.ones(Localisation.NUM_PARTICLES)
tempweights *= self._gauss(self.States[:,Localisation.X] - self.measurement[Localisation.X], 3.73) # 1.73
print "X:", numpy.average(tempweights)
tempweights *= self._gauss(self.States[:,Localisation.Y] - self.measurement[Localisation.Y], 3.73) # 1.73
print "Y:", numpy.average(tempweights)
anglediff = numpy.fabs(self.measurement[Localisation.THETA] - self.previousmeasurement[Localisation.THETA])
if anglediff < 5*numpy.pi/12 or anglediff > 7*numpy.pi/12:
tempweights *= self._gauss(self.States[:,Localisation.THETA] - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.States[:,Localisation.THETA] + numpy.pi - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.States[:,Localisation.THETA] - numpy.pi - self.measurement[Localisation.THETA], 0.09) + self._gauss(self.Sta
|
rlee287/pyautoupdate
|
test/pytest_makevers.py
|
Python
|
lgpl-2.1
| 1,008
| 0.001984
|
import os
import shutil
import pytest
from ..pyautoupdate.launcher import Launcher
@pytest.fixture(scope='function')
def fixture_update_dir(request):
"""Fixture that creates and tears down version.txt and log files"""
def create_update_dir(version="0.0.1"):
def teardown():
if os.path.isfile(Launcher.version_check_log):
os.remove(Launcher.version_check_log)
if os.path.isfile(Launcher.version_doc):
os.remove(Launcher.version_doc)
request.addfinalizer(teardown)
with open(Launcher.version_doc, mode='w') as version_file:
version_file.write(version)
return fixture_update_dir
return create_update_d
|
ir
@pytest.fixture(scope='function')
def create_update_dir(request):
"""Fixture that tears down downloads directory"""
|
def teardown():
shutil.rmtree(Launcher.updatedir)
os.remove(Launcher.version_check_log)
request.addfinalizer(teardown)
return create_update_dir
|
wei2912/bce-simulation
|
utils/coin_var.py
|
Python
|
mit
| 4,752
| 0.004419
|
"""
Simulation of a variation of Buffon's Coin Experiment.
The program checks if the coin will balance in addition
to touching one of the lines of the grid.
"""
import random
import math
DEFAULT_TRIALS = 100000
def __convex__hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorith
|
m. O(n log n) complexity.
Taken from https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross produ
|
ct of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1]
def __transform_center(gap_width, x, y):
"""
Depending on the region which the coin lands,
this function transforms the coin onto a
Cartesian plane where the axes are the closest
corner and returns the coordinates of the
center of the circle.
"""
split = gap_width/2
if x > split:
x -= gap_width
if y > split:
y -= gap_width
return (x, y)
def __get_pivots(diameter, x, y):
"""
Get the x-intercepts and y-intercepts of
the circle and return a list of pivots which
the coin lies on.
"""
pivots = []
radius = diameter / 2
sqval = radius**2 - y**2
if sqval > 0: # no imaginary numbers!
sqrt = sqval**(0.5)
pivots.append((x + sqrt, 0))
pivots.append((x - sqrt, 0))
elif sqval == 0: # tangent
pivots.append((x, 0))
sqval = radius**2 - x**2
if sqval > 0:
sqrt = sqval**(0.5)
pivots.append((0, y + sqrt))
pivots.append((0, y - sqrt))
elif sqval == 0:
pivots.append((0, y))
return pivots
def run_trials(diameter=1.0, gap_width=1.0, trials=DEFAULT_TRIALS):
"""
Run the simulation a specified number of times.
"""
hits = 0
for _ in xrange(trials):
x = random.uniform(0.0, gap_width)
y = random.uniform(0.0, gap_width)
x, y = __transform_center(gap_width, x, y)
# if the center of gravity actually lies on the edge
# the coin will balance
if x == 0 or y == 0:
hits += 1
continue
pivots = __get_pivots(diameter, x, y)
# if it is a tangent to either of the axes
# it won't balance
if len(pivots) < 4:
continue
# convex hull of pivots and center
# check if the center of gravity is a point in the shape
# if it is, the coin does not balance.
# otherwise, the coin does.
pivots.append((x, y))
hull = __convex__hull(pivots)
# if center is in the convex hull
# whee we have a hit
if not (x, y) in hull:
hits += 1
return hits
def predict_prob(diameter=1.0, gap_width=1.0):
"""
For the variables passed into the simulation,
predict the probability that the needle will hit
at least one of the two parallel lines.
diameter and gap_width can be scalars or arrays.
"""
R = diameter / 2
D = gap_width
if D >= 2 * R:
return (
math.pi *
(R / D) ** 2
)
elif D > R * math.sqrt(2):
return (
math.sqrt(
4 *
(R / D) ** 2
- 1
) +
(R / D) ** 2 *
(math.pi - 4 * math.acos(D / (2*R)))
)
else:
return 1
|
antoinecarme/sklearn_explain
|
tests/skl_datasets_reg/RandomReg_500/skl_dataset_RandomReg_500_SVR_poly_8_code_gen.py
|
Python
|
bsd-3-clause
| 149
| 0.006711
|
from sklearn_explain.tests.skl_d
|
atasets_reg import skl_datasets_test as sklte
|
st
skltest.test_reg_dataset_and_model("RandomReg_500" , "SVR_poly_8")
|
antivirtel/Flexget
|
flexget/plugins/metainfo/uoccin_lookup.py
|
Python
|
mit
| 3,924
| 0.004077
|
from __future__ import unicode_literals, division, absolute_import
import os
from flexget import plugin
from flexget.event import event
from flexget.utils import json
def load_uoccin_data(path):
udata = {}
ufile = os.path.join(path, 'uoccin.json')
if os.path.exists(ufile):
try:
with open(ufile, 'r') as f:
udata = json.load(f)
except Exception as err:
raise plugin.PluginError('error reading %s: %s' % (ufile, err))
udata.setdefault('movies', {})
udata.setdefault('series', {})
return udata
class UoccinLookup(object):
schema = { 'type': 'string', 'format': 'path' }
# Run after metainfo_series / thetvdb_lookup / imdb_lookup
@plugin.priority(100)
def on_task_metainfo(self, task, config):
"""Retrieves all the information found in the uoccin.json file for the entries.
Example::
uoccin_lookup: /path/to/gdrive/uoccin
Resulting fields on entries:
on series (requires tvdb_id):
- uoccin_watchlist (true|false)
- uoccin_rating (integer)
- uoccin_tags (list)
on episodes (requires tvdb_id, series_season and series_episode):
- uoccin_collected (true|false)
- uoccin_watched (true|false)
- uoccin_subtitles (list of language codes)
(plus the 3 series specific fields)
on movies (requires imdb_id):
- uoccin_watchlist (true|false)
- uoccin_collected (true|false)
- uoccin_watched (true|false)
- uoccin_rating (integer)
- uoccin_tags (list)
- uoccin_subtitles (list of language codes)
"""
if not task.entries:
return
udata = load_uoccin_data(config)
movies = udata['movies']
series = udata['series']
for entry in task.entries:
entry['uoccin_watchlist'] = False
entry['uoccin_collected'] = False
entry['uoccin_watched'] = False
entry['uoccin_rating'] = None
entry['uoccin_tags'] = []
entry['uoccin_subtitles'] = []
if 'tvdb_id' in entry:
ser = series.get(str(entry['tvdb_id']))
if ser is None:
continue
entry['uoccin_watchlist'] = ser.get('watchlist', False)
entry['uoccin_rating'] = ser.get('rating')
entry['uoccin_tags'] = ser.get('tags', [])
if all(field in entry for field in ['series_season', 'series_episode']):
season = str(entry['series_season'])
episode = entry['series_episode']
edata = ser.get('collected', {}).get(season, {})
|
.get(str(episode))
entry['uoccin_collected'] = isinstance(edata, list)
entry['uoccin_subtitles'] = edata if entry['uoccin_collected'] else []
entry['uoccin_watched'] = episode in ser.get('watched',
|
{}).get(season, [])
elif 'imdb_id' in entry:
try:
mov = movies.get(entry['imdb_id'])
except plugin.PluginError as e:
self.log.trace('entry %s imdb failed (%s)' % (entry['imdb_id'], e.value))
continue
if mov is None:
continue
entry['uoccin_watchlist'] = mov.get('watchlist', False)
entry['uoccin_collected'] = mov.get('collected', False)
entry['uoccin_watched'] = mov.get('watched', False)
entry['uoccin_rating'] = mov.get('rating')
entry['uoccin_tags'] = mov.get('tags', [])
entry['uoccin_subtitles'] = mov.get('subtitles', [])
@event('plugin.register')
def register_plugin():
plugin.register(UoccinLookup, 'uoccin_lookup', api_ver=2)
|
qvazzler/Flexget
|
tests/test_rtorrent.py
|
Python
|
mit
| 13,663
| 0.001244
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.xmlrpc import client as xmlrpc_client
import os
import mock
from flexget.plugins.plugin_rtorrent import RTorrent
torrent_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'private.torrent')
torrent_url = 'file:///%s' % torrent_file
torrent_info_hash = '09977FE761B8D293AD8A929CCAF2E9322D525A6C'
with open(torrent_file, 'rb') as tor_file:
torrent_raw = tor_file.read()
def compare_binary(obj1, obj2):
# Used to compare xmlrpclib.binary objects within a mocked call
if not type(obj1) == type(obj2):
return False
if obj1.data != obj2.data:
return False
return True
class Matcher(object):
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
@mock.patch('flexget.plugins.plugin_rtorrent.xmlrpc_client.ServerProxy')
class TestRTorrentClient(object):
def test_version(self, mocked_proxy):
mocked_client = mocked_proxy()
mocked_client.system.client_version.return_value = '0.9.4'
client = RTorrent('http://localhost/RPC2')
assert client.version == [0, 9, 4]
assert mocked_client.system.client_version.called
def test_load(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.execute.throw.return_value = 0
mocked_proxy.load.raw_start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.load(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'testing'},
start=True,
mkdir=True,
)
assert resp == 0
# Ensure mkdir was called
mocked_proxy.execute.throw.assert_called_with('', 'mkdir', '-p', '/data/downloads')
# Ensure load was called
assert mocked_proxy.load.raw_start.called
match_binary = Matcher(compare_binary, xmlrpc_client.Binary(torrent_raw))
called_args = mocked_proxy.load.raw_start.call_args_list[0][0]
assert len(called_args) == 5
assert '' == called_args[0]
assert match_binary in called_args
fields = [p for p in called_args[2:]]
assert len(fields) == 3
assert 'd.directory.set=\\/data\\/downloads' in fields
assert 'd.custom1.set=testing' in fields
assert 'd.priority.set=3' in fields
def test_torrent(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['/data/downloads'], ['private.torrent'], [torrent_info_hash], ['test_custom1'], [123456]
]
client = RTorrent('http://localhost/RPC2')
torrent = client.torrent(torrent_info_hash, fields=['custom1', 'down_rate']) # Required fields should be added
assert isinstance(torrent, dict)
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('hash') == torrent_info_hash
assert torrent.get('custom1') == 'test_custom1'
assert torrent.get('name') == 'private.torrent'
assert torrent.get('down_rate') == 123456
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash,), 'methodName': 'd.base_path'},
{'params': (torrent_info_hash,), 'methodName': 'd.name'},
{'params': (torrent_info_hash,), 'methodName': 'd.hash'},
{'params': (torrent_info_hash,), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash,), 'methodName': 'd.down.rate'},
]))
def test_torrents(self, mocked_proxy):
mocked_proxy = mocked_proxy()
hash1 = '09977FE761AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
hash2 = '09977FE761BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
mocked_proxy.d.multicall.return_value = (
['/data/downloads', 'private.torrent', hash1, 'test_custom1'],
['/data/downloads', 'private.torrent', hash2, 'test_custom2'],
)
client = RTorrent('http://localhost/RPC2')
torrents = client.torrents(fields=['custom1']) # Required fields should be added
assert isinstance(torrents, list)
for torrent in torrents:
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('name') == 'private.torrent'
if torrent.get('hash') == hash1:
assert torrent.get('custom1') == 'test_custom1'
elif torrent.get('hash') == hash2:
assert torrent.get('custom1') == 'test_custom2'
else:
assert False, 'Invalid hash returned'
assert mocked_proxy.system.multicall.called_with((
['main', 'd.directory_base=', 'd.name=', 'd.hash=', u'd.custom1='],
))
def test_update(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [[0]]
client = RTorrent('http://localhost/RPC2')
update_fields = {
'custom1': 'test_custom1',
'directory_base': '/data/downloads',
'priority': 3,
}
resp = client.update(torrent_info_hash, fields=update_fields)
assert resp == 0
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.directory_base'},
{'params': (torrent_info_hash, 'test_custom1'), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.custom1'}
]))
def test_delete(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.erase.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.delete(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.erase.called_with((torrent_info_hash,))
def test_move(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['private.torrent'], [torrent_info_hash], ['/data/downloads'],
]
mocked_proxy.move.return_value = 0
mocked_proxy.d.directory.set.return_value = 0
mocked_proxy.execute.throw.return_value = 0
client = RTorrent('http://localhost/RPC2')
client.move(torrent_info_hash, '/new/folder')
mocked_proxy.execute.throw.assert_has_calls([
mock.call('', 'mkdir', '-p', '/new/folder'),
mock.call('', 'mv', '-u', '/data/downloads', '/new/folder'),
])
def test_start(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_p
|
roxy.d.start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.start(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.start.called_with((torrent_info_hash,))
def test_stop(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.close.return_value = 0
mocked_proxy.d.stop.ret
|
urn_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.stop(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.stop.called_with((torrent_info_hash,))
assert mocked_proxy.d.close.called_with((torrent_info_hash,))
@mock.patch('flexget.plugins.plugin_rtorrent.RTorrent')
class TestRTorrentOutputPlugin(object):
config = """
tasks:
test_add_torrent:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """'}
rtorrent:
action: add
start: yes
mkdir: yes
uri: http://localhost/SCGI
priority: high
path: /data/downloads
custom1: test_custom1
test_add_torrent_set:
accept_all: yes
set:
path: /data/downloads
custom1: test_custom1
priority: low
|
KrzysztofStachanczyk/Sensors-WWW-website
|
www/env/lib/python2.7/site-packages/django/db/migrations/operations/models.py
|
Python
|
gpl-3.0
| 29,026
| 0.002136
|
from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super(CreateModel, self).__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, six.string_types) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
if isinstance(field.remote_field.model, six.string_types):
strings_to_check.append(field.remote_field.model.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
def model_to_key(self, model):
"""
Take either a model class or an "app_label.ModelName" string
and return (app_label, object_name).
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return model._meta.app_label, model._meta.object_name
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
# Don't allow optimizations of FKs through models they reference
if hasattr(operation.field, "remote_field") and operation.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(operation.field.remote_field.model)
if between.references_model(object_name, app_label):
return False
# Check that it's not through the model
if getattr(operation.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(operation.field.remote_field.through)
if between.references_model(object_name, app_label):
return False
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
r
|
eturn [
CreateModel(
self.name,
fields=[
|
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
|
markreidvfx/pyaaf2
|
tests/test_auid.py
|
Python
|
mit
| 1,210
| 0.002479
|
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
from aaf2.auid import AUID
from uuid import UUID
import uuid
import unittest
class MobIDTests(unittest.Te
|
stCase):
def test_basic(self):
s = "0d010101-0101-2100-060e-2b3402060101"
v = AUID(s)
u = UUID(s)
assert str(v) == s
|
assert str(v.uuid) == s
assert v.uuid == u
def test_be(self):
s = "0d010101-0101-2100-060e-2b3402060101"
v = AUID(s)
u = UUID(s)
assert v.uuid.bytes == v.bytes_be
def test_int(self):
s = "0d010101-0101-2100-060e-2b3402060101"
v = AUID(s)
u = UUID(s)
assert v.int == u.int
v = AUID(int=100)
u = UUID(int=100)
assert v.int == u.int
for i in range(10):
u = uuid.uuid4()
a = AUID(int= u.int)
assert u.int == a.int
def test_noargs(self):
# expected behavour matches uuid.py
with self.assertRaises(TypeError):
AUID()
# print(v.int)
if __name__ == "__main__":
import logging
# logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
biswajitsahu/kuma
|
vendor/packages/translate/storage/test_mo.py
|
Python
|
mpl-2.0
| 4,424
| 0.00859
|
#!/usr/bin/env python
import os
import subprocess
import sys
from cStringIO import StringIO
from translate.storage import factory, mo, test_base
# get directory of this test
dir = os.path.dirname(os.path.abspath(__file__))
# get top-level directory (moral equivalent of ../..)
dir = os.path.dirname(os.path.dirname(dir))
# load python modules from top-level
sys.path.insert(0, dir)
# add top-level to PYTHONPATH for subprocesses
os.environ["PYTHONPATH"] = os.pathsep.join(sys.path)
# add {top-level}/translate/tools to PATH for pocompile
os.environ["PATH"] = os.pathsep.join([os.path.join(dir, "translate", "tools"),
os.environ["PATH"]])
class TestMOUnit(test_base.TestTranslationUnit):
UnitClass = mo.mounit
posources = [
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
''',
# The following test is commented out, because the hash-size is different
# compared to gettext, since we're not counting untranslated units.
#r'''
#msgid ""
#msgstr ""
#"PO-Revision-Date: 2006-02-09 23:33+0200\n"
#"MIME-Version: 1.0\n"
#"Content-Type: text/plain; charset=UTF-8\n"
#"Content-Transfer-Encoding: 8-bit\n"
#
#msgid "plant"
#msgstr ""
#
#msgid ""
#"_: Noun\n"
#"convert"
#msgstr "bekeerling"
#''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transf
|
er-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
msgid ""
"_: Noun\n"
"convert"
msgstr "bekeerling"
msgctxt "verb"
msgid ""
"convert"
msgstr "omskakel"
''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-0
|
9 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
msgid ""
"_: Noun\n"
"convert"
msgstr "bekeerling"
msgctxt "verb"
msgid ""
"convert"
msgstr "omskakel"
msgid "tree"
msgid_plural "trees"
msgstr[0] ""
''']
class TestMOFile(test_base.TestTranslationStore):
StoreClass = mo.mofile
def get_mo_and_po(self):
return (os.path.abspath(self.filename + '.po'),
os.path.abspath(self.filename + '.msgfmt.mo'),
os.path.abspath(self.filename + '.pocompile.mo'))
def remove_po_and_mo(self):
for file in self.get_mo_and_po():
if os.path.exists(file):
os.remove(file)
def setup_method(self, method):
test_base.TestTranslationStore.setup_method(self, method)
self.remove_po_and_mo()
def teardown_method(self, method):
test_base.TestTranslationStore.teardown_method(self, method)
self.remove_po_and_mo()
def test_language(self):
"""Test that we can return the target language correctly."""
store = self.StoreClass()
store.updateheader(add=True, Language="zu")
assert store.gettargetlanguage() == "zu"
def test_output(self):
for posource in posources:
print("PO source file")
print(posource)
PO_FILE, MO_MSGFMT, MO_POCOMPILE = self.get_mo_and_po()
out_file = open(PO_FILE, 'w')
out_file.write(posource)
out_file.close()
subprocess.call(['msgfmt', PO_FILE, '-o', MO_MSGFMT])
subprocess.call(['pocompile', '--errorlevel=traceback', PO_FILE, MO_POCOMPILE])
store = factory.getobject(StringIO(posource))
if store.isempty() and not os.path.exists(MO_POCOMPILE):
# pocompile doesn't create MO files for empty PO files, so we
# can skip the checks here.
continue
mo_msgfmt_f = open(MO_MSGFMT)
mo_pocompile_f = open(MO_POCOMPILE)
try:
mo_msgfmt = mo_msgfmt_f.read()
print("msgfmt output:")
print(repr(mo_msgfmt))
mo_pocompile = mo_pocompile_f.read()
print("pocompile output:")
print(repr(mo_pocompile))
assert mo_msgfmt == mo_pocompile
finally:
mo_msgfmt_f.close()
mo_pocompile_f.close()
|
JonnyJD/rtslib-fb
|
rtslib/node.py
|
Python
|
agpl-3.0
| 8,950
| 0.001453
|
'''
Implements the base CFSNode class and a few inherited variants.
This file is part of RTSLib Community Edition.
Copyright (c) 2011 by RisingTide Systems LLC
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, version 3 (AGPLv3).
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
'''
import os
|
import stat
from utils import fread, fwrite, RTSLibError, RTSLibNotInCFS
class CFSNode(object):
# Where do we store the fabric modules spec files ?
spec_dir = "/var/lib/target/fabric"
# Where is the configfs base LIO directory ?
configfs_dir = '/sys/kernel/config/target'
# TODO: Make the ALUA path generic, not iscsi-centric
# What is the ALUA directory ?
alua_metadata_dir = "/var/lib/target/alua/iSCSI"
# CFSNode private stuff
def __init__(self):
self._path = self.configfs_dir
def __str__(self):
return self.path
def _get_path(self):
return self._path
def _create_in_cfs_ine(self, mode):
'''
Creates the configFS node if it does not already exists depending on
the mode.
any -> makes sure it exists, also works if the node already does exists
lookup -> make sure it does NOT exists
create -> create the node which must not exists beforehand
Upon success (no exception raised), self._fresh is True if a node was
created, else self._fresh is False.
'''
if mode not in ['any', 'lookup', 'create']:
raise RTSLibError("Invalid mode: %s" % mode)
if self.exists and mode == 'create':
raise RTSLibError("This %s already exists in configFS."
% self.__class__.__name__)
elif not self.exists and mode == 'lookup':
raise RTSLibNotInCFS("No such %s in configfs: %s."
% (self.__class__.__name__, self.path))
if not self.exists:
os.mkdir(self.path)
self._fresh = True
else:
self._fresh = False
def _exists(self):
return os.path.isdir(self.path)
def _check_self(self):
if not self.exists:
raise RTSLibNotInCFS("This %s does not exist in configFS."
% self.__class__.__name__)
def _is_fresh(self):
return self._fresh
def _list_files(self, path, writable=None):
'''
List files under a path depending on their owner's write permissions.
@param path: The path under which the files are expected to be. If the
path itself is not a directory, an empty list will be returned.
@type path: str
@param writable: If None (default), returns all parameters, if True,
returns read-write parameters, if False, returns just the read-only
parameters.
@type writable: bool or None
@return: List of file names filtered according to their write perms.
'''
if not os.path.isdir(path):
return []
if writable is None:
names = os.listdir(path)
elif writable:
names = [name for name in os.listdir(path)
if (os.stat("%s/%s" % (path, name))[stat.ST_MODE] \
& stat.S_IWUSR)]
else:
names = [os.path.basename(name) for name in os.listdir(path)
if not (os.stat("%s/%s" % (path, name))[stat.ST_MODE] \
& stat.S_IWUSR)]
names.sort()
return names
# CFSNode public stuff
def list_parameters(self, writable=None):
'''
@param writable: If None (default), returns all parameters, if True,
returns read-write parameters, if False, returns just the read-only
parameters.
@type writable: bool or None
@return: The list of existing RFC-3720 parameter names.
'''
self._check_self()
path = "%s/param" % self.path
return self._list_files(path, writable)
def list_attributes(self, writable=None):
'''
@param writable: If None (default), returns all attributes, if True,
returns read-write attributes, if False, returns just the read-only
attributes.
@type writable: bool or None
@return: A list of existing attribute names as strings.
'''
self._check_self()
path = "%s/attrib" % self.path
return self._list_files(path, writable)
def set_attribute(self, attribute, value):
'''
Sets the value of a named attribute.
The attribute must exist in configFS.
@param attribute: The attribute's name. It is case-sensitive.
@type attribute: string
@param value: The attribute's value.
@type value: string
'''
self._check_self()
path = "%s/attrib/%s" % (self.path, str(attribute))
if not os.path.isfile(path):
raise RTSLibError("Cannot find attribute: %s."
% str(attribute))
else:
try:
fwrite(path, "%s" % str(value))
except IOError, msg:
msg = msg[1]
raise RTSLibError("Cannot set attribute %s: %s"
% (str(attribute), str(msg)))
def get_attribute(self, attribute):
'''
@param attribute: The attribute's name. It is case-sensitive.
@return: The named attribute's value, as a string.
'''
self._check_self()
path = "%s/attrib/%s" % (self.path, str(attribute))
if not os.path.isfile(path):
raise RTSLibError("Cannot find attribute: %s."
% str(attribute))
else:
return fread(path).strip()
def set_parameter(self, parameter, value):
'''
Sets the value of a named RFC-3720 parameter.
The parameter must exist in configFS.
@param parameter: The RFC-3720 parameter's name. It is case-sensitive.
@type parameter: string
@param value: The parameter's value.
@type value: string
'''
self._check_self()
path = "%s/param/%s" % (self.path, str(parameter))
if not os.path.isfile(path):
raise RTSLibError("Cannot find parameter: %s."
% str(parameter))
else:
try:
fwrite(path, "%s\n" % str(value))
except IOError, msg:
msg = msg[1]
raise RTSLibError("Cannot set parameter %s: %s"
% (str(parameter), str(msg)))
def get_parameter(self, parameter):
'''
@param parameter: The RFC-3720 parameter's name. It is case-sensitive.
@type parameter: string
@return: The named parameter value as a string.
'''
self._check_self()
path = "%s/param/%s" % (self.path, str(parameter))
if not os.path.isfile(path):
raise RTSLibError("Cannot find RFC-3720 parameter: %s."
% str(parameter))
else:
return fread(path).rstrip()
def delete(self):
'''
If the underlying configFS object does not exist, this method does
nothing. If the underlying configFS object exists, this method attempts
to delete it.
'''
if self.exists:
os.rmdir(self.path)
path = property(_get_path,
doc="Get the configFS object path.")
exists = property(_exists,
doc="Is True as long as the underlying configFS object exists. " \
+ "If the underlying configFS objects gets deleted " \
+ "either by
|
ashang/calibre
|
src/calibre/ebooks/mobi/debug/mobi6.py
|
Python
|
gpl-3.0
| 32,481
| 0.006373
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct, sys, os
from collections import OrderedDict, defaultdict
from lxml import html
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import (parse_index_record,
parse_tagx_section)
from calibre.ebooks.mobi.utils import (decode_hex_number, decint,
decode_tbs, read_font_record)
from calibre.utils.imghdr import what
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.debug.headers import TextRecord
class TagX(object): # {{{
def __init__(self, tag, num_values, bitmask, eof):
self.tag, self.num_values, self.bitmask, self.eof = (tag, num_values,
bitmask, eof)
self.num_of_values = num_values
self.is_eof = (self.eof == 1 and self.tag == 0 and self.num_values == 0
and self.bitmask == 0)
def __repr__(self):
return 'TAGX(tag=%02d, num_values=%d, bitmask=%r, eof=%d)' % (self.tag,
self.num_values, bin(self.bitmask), self.eof)
# }}}
class SecondaryIndexHeader(object): # {{{
def __init__(self, record):
self.record = record
raw = self.record.raw
# open('/t/index_header.bin', 'wb').write(raw)
if raw[:4] != b'INDX':
raise ValueError('Invalid Secondary Index Record')
self.header_length, = struct.unpack('>I', raw[4:8])
self.unknown1 = raw[8:16]
self.index_type, = struct.unpack('>I', raw[16:20])
self.index_type_desc = {0: 'normal', 2:
'inflection', 6: 'calibre'}.get(self.index_type, 'unknown')
self.idxt_start, = struct.unpack('>I', raw[20:24])
self.index_count, = struct.unpack('>I', raw[24:28])
self.index_encoding_num, = struct.unpack('>I', raw[28:32])
self.index_encoding = {65001: 'utf-8', 1252:
'cp1252'}.get(self.index_encoding_num, 'unknown')
if self.index_encoding == 'unknown':
raise ValueError(
'Unknown index encoding: %d'%self.index_encoding_num)
self.unknown2 = raw[32:36]
self.num_index_entries, = struct.unpack('>I', raw[36:40])
self.ordt_start, = struct.unpack('>I', raw[40:44])
self.ligt_start, = struct.unpack('>I', raw[44:48])
self.num_of_ligt_entries, = struct.unpack('>I', raw[48:52])
self.num_of_cncx_blocks, = struct.unpack('>I', raw[52:56])
self.unknown3 = raw[56:180]
self.tagx_offset, = struct.unpack(b'>I', raw[180:184])
if self.tagx_offset != self.header_length:
raise ValueError('TAGX offset and header length disagree')
self.unknown4 = raw[184:self.header_length]
tagx = raw[self.header_length:]
if not tagx.startswith(b'TAGX'):
raise ValueError('Invalid TAGX section')
self.tagx_header_length, = struct.unpack('>I', tagx[4:8])
self.tagx_control_byte_count, = struct.unpack('>I', tagx[8:12])
self.tagx_entries = [TagX(*x) for x in parse_tagx_section(tagx)[1]]
if self.tagx_entries and not self.tagx_entries[-1].is_eof:
raise ValueError('TAGX last entry is not EOF')
idxt0_pos = self.header_length+self.tagx_header_length
num = ord(raw[idxt0_pos])
count_pos = idxt0_pos+1+num
self.last_entry = raw[idxt0_pos+1:count_pos]
self.ncx_count, = struct.unpack(b'>H', raw[count_pos:count_pos+2])
# There may be some alignment zero bytes between the end of the idxt0
# and self.idxt_start
idxt = raw[self.idxt_start:]
if idxt[:4] != b'IDXT':
raise ValueError('Invalid IDXT header')
length_check, = struct.unpack(b'>H', idxt[4:6])
if length_check != self.header_length + self.tagx_header_length:
raise ValueError('Length check failed')
if idxt[6:].replace(b'\0', b''):
raise ValueError('Non null trailing bytes after IDXT')
def __str__(self):
ans = ['*'*20 + ' Secondary Index Header '+ '*'*20]
a = ans.append
def u(w):
a('Unknown: %r (%d bytes) (All zeros: %r)'%(w,
len(w), not bool(w.replace(b'\0', b''))))
a('Header length: %d'%self.header_length)
u(self.unknown1)
a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type))
a('Offset to IDXT start: %d'%self.idxt_start)
a('Number of index records: %d'%self.index_count)
a('Index encoding: %s (%d)'%(self.index_encoding,
self.index_encoding_num))
u(self.unknown2)
a('Number of index entries: %d'% self.num_index_entries)
a('ORDT start: %d'%self.ordt_start)
a('LIGT start: %d'%self.ligt_start)
a('Number of LIGT entries: %d'%self.num_of_ligt_entries)
a('Number of cncx blocks: %d'%self.num_of_cncx_blocks)
u(self.unknown3)
a('TAGX offset: %d'%self.tagx_offset)
u(self.unknown4)
a('\n\n')
a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20)
a('Header length: %d'%self.tagx_header_length)
a('Control byte count: %d'%self.tagx_control_byte_count)
for i in self.tagx_entries:
a('\t' + repr(i))
a('Index of last IndexEntry in secondary index record: %s'% self.last_entry)
a('Number of entries in the NCX: %d'% self.ncx_count)
return '\n'.join(ans)
# }}}
class IndexHeader(object): # {{{
def __init__(self, record):
self.record = record
raw = self.record.raw
# open('/t/index_header.bin', 'wb').write(raw)
if raw[:4] != b'INDX':
raise ValueError('Invalid Primary Index Record')
self.header_length, = struct.unpack('>I', raw[4:8])
self.unknown1 = raw[8:12]
self.header_type, = struct.unpack('>I', raw[12:16])
self.index_type, = struct.unpack('>I', raw[16:20])
self.index_type_desc = {0: 'normal', 2:
'inflection', 6: 'calibre'}.get(self.index_type, 'unknown')
self.idxt_start, = struct.unpack('>I', raw[20:24])
self.index_count, = struct.unpack('>I', raw[24:28])
self.index_encoding_num, = struct.unpack('>I', raw[28:32])
self.index_encoding = {65001: 'utf-8', 1252:
'cp1252'}.get(self.index_encoding_num, 'unknown')
if self.index_encoding == 'unknown':
raise ValueError(
'Unknown index encoding: %d'%self.index_encoding_num)
self.possibly_language = raw[32:36]
self.num_index_entries, = struct.unpack('>I', raw[36:40])
self.ordt_start, = struct.unpack('>I', raw[40:44])
self.ligt_start, = struct.unpack('>I', raw[44:48])
self.num_of_ligt_entries, = struct.unpack('>I', raw[48:52])
self.num_of_cncx_blocks, = struct.unpack('>I', raw[52:56])
self.unknown2 = raw[56:180]
self.tagx_offset, = stru
|
ct.unpack(b'>I', raw[180:184])
if self.tagx_offset != self.header_length:
raise ValueError('TAGX offset and header length disagree')
self.unknown3 = raw[184:self.header_length]
tagx = raw[self.header_length:]
if not tagx.startswith(b'
|
TAGX'):
raise ValueError('Invalid TAGX section')
self.tagx_header_length, = struct.unpack('>I', tagx[4:8])
self.tagx_control_byte_count, = struct.unpack('>I', tagx[8:12])
self.tagx_entries = [TagX(*x) for x in parse_tagx_section(tagx)[1]]
if self.tagx_entries and not self.tagx_entries[-1].is_eof:
raise ValueError('TAGX last entry is not EOF')
idxt0_pos = self.header_length+self.tagx_header_length
last_num, consumed = decode_hex_number(raw[idxt0_pos:])
count_pos = idxt0_pos + consumed
self.ncx_count, = struct.unpack(b'>H', raw[count_pos:count_pos+2])
self.last_entry = last_num
if last_num != sel
|
darky83/E.F.A.-2.x.x.x
|
build/Sphinx/sphinxapi.py
|
Python
|
gpl-3.0
| 34,597
| 0.05434
|
#
# $Id$
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2013, Andrew Aksyonoff
# Copyright (c) 2008-2013, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x11E
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x103
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x101
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_FILTER_STRING = 3
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_FACTORS = 1001
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUP
|
BY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from re
|
sult-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0 # per-query max_predicted_time
self._outerorderby = '' # outer match sort by
self._outeroffset = 0 # outer offset
self._outerlimit = 0 # outer limit
self._hasouter = False # sub-select enabled
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
self._host = host
if isinstance(port, int):
assert(port>0 and port<65536)
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if st
|
syllog1sm/TextBlob
|
text/nltk/tag/simplify.py
|
Python
|
mit
| 3,411
| 0.004398
|
# Natural Lang
|
uage Toolkit: POS Tag Simplification
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Brown Corpus
# http://khnt.hit.uib.no/icame/manuals/brown/INDEX.HTM
|
brown_mapping1 = {
'j': 'ADJ', 'p': 'PRO', 'm': 'MOD', 'q': 'DET',
'w': 'WH', 'r': 'ADV', 'i': 'P',
'u': 'UH', 'e': 'EX', 'o': 'NUM', 'b': 'V',
'h': 'V', 'f': 'FW', 'a': 'DET', 't': 'TO',
'cc': 'CNJ', 'cs': 'CNJ', 'cd': 'NUM',
'do': 'V', 'dt': 'DET',
'nn': 'N', 'nr': 'N', 'np': 'NP', 'nc': 'N',
'--': '--'
}
brown_mapping2 = {
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG', 'vbn': 'VN'
}
def simplify_brown_tag(tag):
tag = tag.lower()
if tag[0] in brown_mapping1:
return brown_mapping1[tag[0]]
elif tag[:2] in brown_mapping1: # still doesn't handle DOD tag correctly
return brown_mapping1[tag[:2]]
try:
if '-' in tag:
tag = tag.split('-')[0]
return brown_mapping2[tag]
except KeyError:
return tag.upper()
# Wall Street Journal tags (Penn Treebank)
wsj_mapping = {
'-lrb-': '(', '-rrb-': ')', '-lsb-': '(',
'-rsb-': ')', '-lcb-': '(', '-rcb-': ')',
'-none-': '', 'cc': 'CNJ', 'cd': 'NUM',
'dt': 'DET', 'ex': 'EX', 'fw': 'FW', # existential "there", foreign word
'in': 'P', 'jj': 'ADJ', 'jjr': 'ADJ',
'jjs': 'ADJ', 'ls': 'L', 'md': 'MOD', # list item marker
'nn': 'N', 'nnp': 'NP', 'nnps': 'NP',
'nns': 'N', 'pdt': 'DET', 'pos': '',
'prp': 'PRO', 'prp$': 'PRO', 'rb': 'ADV',
'rbr': 'ADV', 'rbs': 'ADV', 'rp': 'PRO',
'sym': 'S', 'to': 'TO', 'uh': 'UH',
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG',
'vbn': 'VN', 'vbp': 'V', 'vbz': 'V',
'wdt': 'WH', 'wp': 'WH', 'wp$': 'WH',
'wrb': 'WH',
'bes': 'V', 'hvs': 'V', 'prp^vbp': 'PRO' # additions for NPS Chat corpus
}
def simplify_wsj_tag(tag):
if tag and tag[0] == '^':
tag = tag[1:]
try:
tag = wsj_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
indian_mapping = {
'nn': 'N', 'vm': 'MOD', 'jj': 'ADJ', 'nnp': 'NP',
'prp': 'PRO', 'prep': 'PRE', 'vaux': 'V', 'vfm': 'V',
'cc': 'CNJ', 'nnpc': 'NP', 'nnc': 'N', 'qc': 'QC',
'dem': 'DET', 'vrb': 'V', 'qfnum': 'NUM', 'rb': 'ADV',
'qf': 'DET', 'punc': '.', 'rp': 'PRT', 'psp': 'PSP',
'nst': 'N', 'nvb': 'N', 'vjj': 'V', 'neg': 'NEG',
'vnn': 'V', 'xc': 'XC', 'intf': 'INTF', 'nloc': 'N',
'jvb': 'ADJ', 'wq': 'WH', 'qw': 'WH', 'jj:?': 'ADJ',
'"cc': 'CNJ', 'nnp,': 'NP', 'sym\xc0\xa7\xb7': 'SYM',
'symc': 'SYM'}
def simplify_indian_tag(tag):
if ':' in tag:
tag = tag.split(':')[0]
try:
tag = indian_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
# Alpino tags
alpino_mapping = {
'noun':'N', 'name': 'NP', 'vg': 'VG', 'punct':'.',
'verb':'V', 'pron': 'PRO', 'prep':'P'
}
def simplify_alpino_tag(tag):
try:
tag = alpino_mapping[tag]
except KeyError:
pass
return tag.upper()
# Default tag simplification
def simplify_tag(tag):
return tag[0].upper()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
ejordangottlieb/pyswmap
|
examples/basic_example.py
|
Python
|
mit
| 4,881
| 0.002868
|
#!/usr/bin/env python
import sys
sys.path.append('..')
from pyswmap import MapCalc
# A quick example showing current module capabilities:
# We create a new instance of class MapCalc and supply the BMR
# with the following values:
# 1. The IPv6 rule prefix: rulev6 (a string)
# 2. The IPv4 rule prefix: rulev4 (a string)
# 3. The PSID Offset: psidoffset (an integer)
# Note: In the absence of this value a default of 6 will be used
#
# One of the two following values:
# 4a. The Sharing Ratio: ratio (an integer)
# or
# 4b. The EA Length: ealen (an integer)
#
# T
|
his will result in the both calculated and validated class variables:
#
# m.rulev4: The IPv4 rule prefix used by a particular
# mapping rule.
#
# m.rulev6:
|
The IPv6 rule prefix used by a particular
# mapping rule.
#
# m.rulev4mask: The number of bits in the IPv4 rule subnet
# mask.
#
# m.rulev6mask: The number of bits in the IPv6 rule subnet
# mask.
#
# m.ealen: The number of Embedded Address (EA) bits.
#
# m.ratio: The sharing ratio of # of users per IPv4
# address. This is 2 to the power of bits
# in the PSID field.
#
# m.psidoffset: The PSID Offset value. Defined as the
# "A" field in the IETF MAP specification.
#
# m.portbits: The number of contiguous ports as defined
# by the "m bits" in the IETF MAP
# specification.
#
# m.psidbits: The length in bits of the PSID field. It
# is defined as the "k bits" in the IETF MAP
# specification.
#
m = MapCalc( rulev6='fd80::/48',
rulev4='24.50.100.0/24',
#psidoffset=6,
ratio=64,
#ealen=14,
)
# Supply arbitrary layer-4 port that is valid given PSID Offset to
# gen_psid method. This will return the following value:
# psid: The port-set ID which defines the
# algorithmically assigned ports unique to
# a particular MAP CE.
portvalue = 40000
psid = m.gen_psid(portvalue)
# A single address from the IPv4 rule prefix
sharedv4 = '24.50.100.100'
# Supply the IPv4 address from IPv4 rule prefix and PSID to get_mapce_addr
# method and use them to return:
#
# mapece: The MAP IPv6 address. This address
# is used to reach the MAP functions
# of a provisioned/configured MAP CE.
mapce = m.get_mapce_addr(sharedv4,psid)
# Supply an IPv4 address from IPv4 rule prefix and PSID to get_mapce_prefix
# method and use them to return:
# pd: The end-user IPv6 prefix. Typically,
# but not exclusively DHCPv6 PD. Can
# also be assigned via SLAAC or configured
# manually.
pd = m.get_mapce_prefix(sharedv4,psid)
# Detailed definitions are available in draft-ietf-softwire-map.
# Print out some of the pertinent user supplied and calculated values
print("\n\n")
print("################################################")
print("BMR:")
print(" Rule IPv6 Prefix: {}".format(m.rulev6))
print(" Rule IPv4 Prefix: {}".format(m.rulev4))
print(" PSID Offset: {}".format(m.psidoffset))
print(" Sharing Ratio: {} to 1".format(m.ratio))
print(" EA Length: {}".format(m.ealen))
print("Shared IPv4 and Port Session State:")
print(" Shared IPv4: {}".format(sharedv4))
print(" Port: {}".format(portvalue))
print("Other Calculated Values:")
print(" Port Bits: {}".format(m.portbits))
print(" Ranges Allocated: {}".format(2**m.psidoffset - 1))
print(" PSID Bits: {}".format(m.psidbits))
print("################################################")
print("------------------------------------------------")
print("PSID: {}".format(psid))
print("PD for this client is: {}".format(pd))
print("MAP CE Address is: {}".format(mapce))
print("------------------------------------------------")
print("Output to follow will include the full range of ports assigned")
print("to calculated PSID.")
print("Note: This can result in a really long list up to 2^16")
raw_input = vars(__builtins__).get('raw_input',input)
raw_input("Press the ENTER/RETURN key to continue")
print("\n")
# Print out list of ports for session PSID
print(m.port_list(psid))
|
MihaZelnik/Django-Unchained
|
src/project/apps/web/models.py
|
Python
|
mit
| 1,716
| 0.000583
|
from django_gravatar.helpers import get_gravatar_url, has_gravatar
from django.contrib.auth.models import User
from django.db import models
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
def __unicode__(self):
return self.user.username
class Meta:
db_table = 'user_profile'
@property
def social(self):
return self.user.socialaccount_set.all().first()
@property
def provider(self):
if self.social:
return self.social.provider
return None
@property
def avatar(self):
if self.provider == 'twitter':
|
return self.
|
_get_twitter_avatar('400x400')
else:
return self._get_gravatar(100)
@property
def avatar_small(self):
if self.provider == 'twitter':
return self._get_twitter_avatar('normal')
else:
return self._get_gravatar(30)
@property
def bio(self):
if self.social:
return self.social.extra_data.get('description')
return None
def _get_twitter_avatar(self, size):
if self.social:
url = self.social.get_avatar_url()
file_name = self.social.get_avatar_url().split('/')[-1]
new_file_name = file_name.replace('.', '_%s.' % size)
return url.replace(file_name, new_file_name)
return None
def _get_gravatar(self, size):
if has_gravatar(self.user.email):
return get_gravatar_url(self.user.email, size=size)
else:
return "http://robohash.org/%s.png" % self.user.email
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
exactassembly/cerise
|
app/project/models.py
|
Python
|
gpl-2.0
| 646
| 0.006192
|
from ..app import db
class Step(db.Embedde
|
dDocument):
action = db.StringField(max_length=255)
workdir = db.StringField(max_length=255)
class SubPro
|
ject(db.EmbeddedDocument):
id = db.ObjectIdField(required=True, default=lambda: ObjectId())
name = db.StringField(max_length=255)
url = db.StringField(max_length=255)
steps = db.EmbeddedDocumentListField(Step, max_length=25)
class Project(db.EmbeddedDocument):
name = db.StringField(max_length=255)
url = db.StringField(max_length=255)
subs = db.EmbeddedDocumentListField(SubProject, max_length=25)
steps = db.EmbeddedDocumentListField(Step, max_length=25)
|
cloudbase/neutron-virtualbox
|
neutron/tests/unit/openvswitch/test_ovs_tunnel.py
|
Python
|
apache-2.0
| 28,244
| 0
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import time
import mock
from oslo_config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 420
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS)
LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
VM_DEVICE_OWNER = "compute:None"
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort(object):
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding(object):
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(base.BaseTestCase):
USE_VETH_INTERCONNECTION = False
VETH_MTU = None
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_INT_OFPORT = 33333
self.MAP_TUN_PHY_OFPORT = 44444
self.inta = mock.Mock()
self.intb = mock.Mock()
self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(),
self.TUN_BRIDGE: mock.Mock(),
self.MAP_TUN_BRIDGE: mock.Mock(),
}
self.ovs_int_ofports = {
'patch-tun': self.TUN_OFPORT,
'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
}
self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start()
self.mock_bridge.side_effect = (lambda br_name:
self.ovs_bridges
|
[br_name])
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
self.mock_int_bridge.add_patch_port.side_effect = (
lambda tap, peer: self.ovs_int_ofports[tap])
self.m
|
ock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.add_patch_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.device_exists = mock.patch.object(ip_lib, 'device_exists').start()
self.device_exists.return_value = True
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.get_bridges = mock.patch.object(ovs_lib.BaseOVS,
'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE]
self.execute = mock.patch('neutron.agent.linux.utils.execute').start()
self._define_expected_calls()
def _define_expected_calls(self):
self.mock_bridge_expected = [
mock.call(self.INT_BRIDGE),
mock.call(self.MAP_TUN_BRIDGE),
mock.call(self.TUN_BRIDGE),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.delete_port('patch-tun'),
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.add_flow(priority=0, table=constants.CANARY_TABLE,
actions='drop'),
]
self.mock_map_tun_bridge_expected = [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_INT_OFPORT,
actions='drop'),
mock.call.set_db_attribute(
'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_map_tun_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_PHY_OFPORT,
actions='drop'),
mock.call.set_db_attribute(
'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(secure_mode=True),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_tun_bridge_expected += [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN,
in_port=self.INT_OFPORT),
mock.call.add_flow(priority=0, actions="drop"),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
|
FrodeSolheim/fs-uae-launcher
|
launcher/apps/dosbox_fs.py
|
Python
|
gpl-2.0
| 276
| 0
|
import sys
from fsgamesys.plugins.pluginmanager import PluginManager
"""
DOSBox-FS launcher script used for testing.
"""
def app_main():
executable
|
= PluginManager.instance().find_executable("dosbox-fs")
proc
|
ess = executable.popen(sys.argv[1:])
process.wait()
|
a10networks/acos-client
|
acos_client/version.py
|
Python
|
apache-2.0
| 641
| 0
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the
|
"License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language g
|
overning permissions and limitations
# under the License.
VERSION = '2.9.0'
|
oscarbranson/apt-tools
|
apt_importers.py
|
Python
|
gpl-2.0
| 4,972
| 0.007039
|
import pandas as pd
import struct
def read_pos(f):
""" Loads an APT .pos file as a pandas dataframe.
Columns:
x: Reconstructed x position
y: Reconstructed y position
z: Reconstructed z position
Da: mass/charge ratio of ion"""
# read in the data
n = len(file(f).read())/4
d = struct.unpack('>'+'f'*n,file(f).read(4*n))
# '>' denotes 'big-endian' byte order
# unpack data
pos = pd.DataFrame({'x': d[0::4],
'y': d[1::4],
'z': d[2::4],
'Da': d[3::4]})
return pos
def read_epos(f):
"""Loads an APT .epos file as a pandas dataframe.
Columns:
x: Reconstructed x position
y: Reconstructed y position
z: Reconstructed z position
Da: Mass/charge ratio of ion
ns: Ion Time Of Flight
DC_kV: Potential
pulse_kV: Size of voltage pulse (voltage pulsing mode only)
det_x: Detector x position
det_y: Detector y position
pslep: Pulses since last event pulse (i.e. ionisation rate)
ipp: Ions per pulse (multihits)
[x,y,z,Da,ns,DC_kV,pulse_kV,det_x,det_y,pslep,ipp].
pslep = pulses since last event pulse
ipp = ions per pulse
When more than one ion is recorded for a given pulse, only the
first event will have an entry in the "Pulses since last evenT
pulse" column. Each subsequent event for that pulse will have
an entry of zero because no additional pulser firings occurred
before that event was recorded. Likewise, the "Ions Per Pulse"
column will contain the total number of recorded ion events for
a given pulse. This is normally one, but for a sequence of records
a pulse with multiply recorded ions, the first ion record will
have the total number of ions measured in that pulse, while the
remaining records for that pulse will have 0 for the Ions Per
Pulse value.
~ Appendix A of 'Atom Probe tomography: A Users Guide',
notes on ePOS format."""
# read in the data
n = len(file(f,'rb').read())/4
rs = n / 11
d = struct.unpack('>'+'fffffffffII'*rs,file(f,'rb').read(4*n))
# '>' denotes 'big-endian' byte order
# unpack data
pos = pd.DataFrame({'x': d[0::11],
'y': d[1::11],
'z': d[2::11],
'Da': d[3::11],
'ns': d[4::11],
'DC_kV': d[5::11],
'pulse_kV': d[6::11],
'det_x': d[7::11],
'det_y': d[8::11],
'pslep': d[9::11], # pulses since last event pulse
'ipp': d[10::11]}) # ions per pulse
return pos
def read_rrng(f):
"""Loads a .rrng file produced by IVAS. Returns two dataframes of 'ions'
and 'ranges'."""
import re
rf = open(f,'r').readlines()
patterns = re.compile(r'Ion([0-9]+)=([A-Za-z0-9]+).*|Range([0-9]+)=(\d+.\d+) +(\d+.\d+) +Vol:(\d+.\d+) +([A-Za-z:0-9 ]+) +Color:([A-Z0-9]{6})')
ions = []
rrngs = []
for line in rf:
m = patterns.search(line)
if m:
if m.groups()[0] is not None:
ions.append(m.groups()[:2])
else:
rrngs.append(m.groups()[2:])
ions = pd.DataFrame(ions, columns=['number','name'])
ions.set_index('number',inplace=True)
rrngs = pd.DataFrame(rrngs, columns=['number','lower','upper','vol','comp','colour'])
rrngs.set_index('number',inplace=True)
|
rrngs[['lower','upper','vol']] = rrngs[['lower','upper','vol']].astype(float)
rrngs[['comp','c
|
olour']] = rrngs[['comp','colour']].astype(str)
return ions,rrngs
def label_ions(pos,rrngs):
"""labels ions in a .pos or .epos dataframe (anything with a 'Da' column)
with composition and colour, based on an imported .rrng file."""
pos['comp'] = ''
pos['colour'] = '#FFFFFF'
for n,r in rrngs.iterrows():
pos.loc[(pos.Da >= r.lower) & (pos.Da <= r.upper),['comp','colour']] = [r['comp'],'#' + r['colour']]
return pos
def deconvolve(lpos):
"""Takes a composition-labelled pos file, and deconvolves
the complex ions. Produces a dataframe of the same input format
with the extra columns:
'element': element name
'n': stoichiometry
For complex ions, the location of the different components is not
altered - i.e. xyz position will be the same for several elements."""
import re
out = []
pattern = re.compile(r'([A-Za-z]+):([0-9]+)')
for g,d in lpos.groupby('comp'):
if g is not '':
for i in range(len(g.split(' '))):
tmp = d.copy()
cn = pattern.search(g.split(' ')[i]).groups()
tmp['element'] = cn[0]
tmp['n'] = cn[1]
out.append(tmp.copy())
return pd.concat(out)
|
perfidia/screensketch
|
src/screensketch/screenspec/reader/__init__.py
|
Python
|
mit
| 54
| 0
|
from text import TextRead
|
er
from xml import XML
|
Reader
|
MaxTyutyunnikov/lino
|
lino/utils/requests.py
|
Python
|
gpl-3.0
| 1,997
| 0.027541
|
## Copyright 2009 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import os
def again(request,*args,**kw):
get = request.GET.copy(
|
)
for k,v in kw.items():
if v is None: # value None means "remove t
|
his key"
if get.has_key(k):
del get[k]
else:
get[k] = v
path = request.path
if len(args):
path += "/" + "/".join(args)
path = os.path.normpath(path)
path = path.replace("\\","/")
s = get.urlencode()
if len(s):
path += "?" + s
#print pth
return path
def get_redirect(request):
if hasattr(request,"redirect_to"):
return request.redirect_to
def redirect_to(request,url):
request.redirect_to = url
#~ def is_editing(request):
#~ editing = request.GET.get("editing",None)
#~ if editing is None:
#~ path = request.session.get("editing",None)
#~ else:
#~ editing = int(editing)
#~ if editing:
#~ request.session["editing"] = path = request.path
#~ else:
#~ request.session["editing"] = path = None
#~ if request.path == path:
#~ return True
#~ request.session["editing"] = None
#~ return False
#~ def stop_editing(request):
#~ request.session["editing"] = None
#~ def start_editing(request):
#~ request.session["editing"] = request.path
|
agry/NGECore2
|
scripts/mobiles/yavin4/geonosian_bunker/enhanced_kliknik.py
|
Python
|
lgpl-3.0
| 1,688
| 0.026659
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('geon
|
osian_kliknik_force_strong')
|
mobileTemplate.setLevel(89)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(45)
mobileTemplate.setHideType("Scaley Meat")
mobileTemplate.setHideAmount(40)
mobileTemplate.setSocialGroup("geonosian bunker")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_kliknik_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/base/shared_creature_base.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_5')
attacks.add('bm_damage_poison_5')
attacks.add('bm_dampen_pain_5')
attacks.add('bm_slash_5')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('enhanced_kliknik', mobileTemplate)
return
|
amadeusproject/amadeuslms
|
analytics/tests/test_general_dashboard.py
|
Python
|
gpl-2.0
| 4,202
| 0.031392
|
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.test import TestCase, Client, override_settings
from django.core.urlresolvers import resolve
from reports.views import ReportView
from subjects.models import Subject, Tag
from users.models import User
from topics.models import Topic, Resource
from chat.models import Conversation, TalkMessages
from categories.models import Category
from datetime import datetime
from log.models import Log
from django.db.models import Q
from ..views import most_used_tags, most_accessed_subjects
from django.http import HttpResponse, JsonResponse
class APIDashBoardTest(TestCase):
def setUp(self):
self.c = Client()
self.student = User.objects.create(username = "student01", email= "student01@amadeus.br")
self.student.set_password("amadeus") #because of the hash function used
self.student.save()
if self.c.login(email="student01@amadeus.br", password="amadeus"):
print("student01 logged in")
self.student02 = User.objects.create(username= "student02", email = "student02@amadeus.br")
self.student02.set_password("amadeus")
self.student02.save()
self.category = Category(name= "category 01")
self.category.save()
def test_most_used_tags(self):
"""
testing if the catches all tags used in a resource and in a subject
"""
t = Tag(name="felipe")
t.save()
t1 = Tag(name="b2")
t1.save()
s1 = Subject.objects.create(name="subject", visible= True, init_date= datetime.now(), end_date= datetime.now(),
subscribe_begin = datetime.now(), subscribe_end= datetime.now() )
s1.tags.add(t)
s1.save()
s2 = Subject.objects.create(name="subject dois", visible= True, init_date= datetime.now(), end_date= datetime.now(),
subscribe_begin = datetime.now(), subscribe_end= datetime.now() )
s2.tags.add(t)
s2.save()
r1 = Resource.objects.create(name="resource um")
r1.tags.add(t1)
r1.save()
|
expected_data = [{'name': 'felipe', 'count': 2}, {'name':'b2', 'c
|
ount': 1}]
data = self.c.get('/analytics/most_used_tags/')
self.assertEqual(data.status_code, 200 )
self.assertJSONEqual(str(data.content, encoding='UTF-8'), expected_data)
@override_settings(STATICFILES_STORAGE = None) # added decorator
def test_most_accessed_subjects(self):
"""
test if we collect the correct amount of access to a subject
"""
s1 = Subject.objects.create(name="subject", visible= True, init_date= datetime.now(), end_date= datetime.now(),
subscribe_begin = datetime.now(), subscribe_end= datetime.now() )
s1.students.add(self.student)
s1.students.add(self.student02)
s1.category = self.category
s1.save()
response = self.c.get('/subjects/view/'+str(s1.slug)+'/')
print(response)
self.assertEqual(response.status_code, 200)
self.c.logout() #logout student one
if self.c.login(email="student02@amadeus.br", password="amadeus"):
print("student02 logged in")
response = self.c.get('/subjects/view/'+str(s1.slug)+'/')
self.assertEqual(response.status_code, 200)
response = self.c.get('/analytics/most_accessed_subjects/')
self.assertEqual(response.status_code, 200)
expected_data = [{'name': s1.name, 'count': 2}]
self.assertJSONEqual(str(response.content, encoding = 'UTF-8'), expected_data)
def test_most_accessed_categories(self):
self.fail("finish test on categories")
def test_most_active_users(self):
self.fail("finish the test")
|
splee/bigdoorkit
|
tests/test_resource.py
|
Python
|
mit
| 3,184
| 0.003769
|
import os
from nose.too
|
ls import assert_equal
from nose import SkipTest
from unittest import TestCase
from tests import TEST_APP_KEY, TEST_APP_SECRET
from bigdoorkit.client import Client
from bigdoorkit.resources.leve
|
l import NamedLevelCollection, NamedLevel
from bigdoorkit.resources.award import NamedAwardCollection, NamedAward
from bigdoorkit.resources.good import NamedGoodCollection, NamedGood
class TestNamedLevelCollection(TestCase):
def setUp(self):
self.client = Client(TEST_APP_SECRET, TEST_APP_KEY)
def test_get(self):
""" test of getting a list of named level collections """
named_level_collections = NamedLevelCollection.all(self.client)
assert len(named_level_collections) == 1
nlc = named_level_collections[0]
assert_equal(nlc.pub_title, 'test title')
assert_equal(nlc.pub_description, 'test description')
assert_equal(nlc.end_user_title, 'test user title')
assert_equal(nlc.end_user_description, 'test user description')
assert_equal(nlc.currency_id, 4920)
class TestNamedLevel(TestCase):
def setUp(self):
self.client = Client(TEST_APP_SECRET, TEST_APP_KEY)
def test_get(self):
named_level = NamedLevel.get(7859, self.client)
assert named_level
assert named_level.named_level_collection_id
assert_equal(named_level.named_level_collection_id, 2092)
class TestNamedAward(TestCase):
def setUp(self):
self.client = Client(TEST_APP_SECRET, TEST_APP_KEY)
def test_get(self):
named_award = NamedAward.get(7193, self.client)
assert named_award.named_award_collection_id
assert_equal(named_award.named_award_collection_id, 1920)
assert_equal(named_award.pub_title, 'obligatory early achievement')
assert_equal(named_award.pub_description, 'early achievement')
assert_equal(named_award.end_user_title, 'just breath')
assert_equal(named_award.end_user_description, 'congratulations')
assert_equal(named_award.id, 7193)
class TestNamedAwardCollection(TestCase):
def setUp(self):
self.client = Client(TEST_APP_SECRET, TEST_APP_KEY)
def test_all(self):
nac = NamedAwardCollection.all(self.client)
assert nac
assert len(nac) == 1
nac = nac[0]
assert_equal(nac.pub_title, 'application achievements')
assert_equal(nac.pub_description, 'a set of achievements that the '+\
'user can earn')
assert_equal(nac.end_user_title, 'achievements')
assert_equal(nac.end_user_description, 'things you can get')
assert_equal(nac.id, 1920)
class TestNamedGood(TestCase):
def setUp(self):
self.client = Client(TEST_APP_SECRET, TEST_APP_KEY)
def test_get(self):
ng = NamedGood.get(6169, self.client)
assert ng
assert_equal(ng.pub_title, 'example good')
assert_equal(ng.pub_description, 'something you can purchase')
assert_equal(ng.end_user_title, 'example good')
assert_equal(ng.end_user_description, 'something you can purchase')
assert_equal(ng.id, 6169)
|
sebrandon1/neutron
|
neutron/tests/unit/common/test_utils.py
|
Python
|
apache-2.0
| 30,791
| 0.00013
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import inspect
import os.path
import re
import sys
impor
|
t eventlet
import mock
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as exc
from oslo_log import log as logging
import six
import testscenarios
import testtools
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.tests import base
from neutron.tests.common imp
|
ort helpers
from neutron.tests.unit import tests
load_tests = testscenarios.load_tests_apply_scenarios
class TestParseMappings(base.BaseTestCase):
def parse(self, mapping_list, unique_values=True, unique_keys=True):
return utils.parse_mappings(mapping_list, unique_values, unique_keys)
def test_parse_mappings_fails_for_missing_separator(self):
with testtools.ExpectedException(ValueError):
self.parse(['key'])
def test_parse_mappings_fails_for_missing_key(self):
with testtools.ExpectedException(ValueError):
self.parse([':val'])
def test_parse_mappings_fails_for_missing_value(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:'])
def test_parse_mappings_fails_for_extra_separator(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:val:junk'])
def test_parse_mappings_fails_for_duplicate_key(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:val1', 'key:val2'])
def test_parse_mappings_fails_for_duplicate_value(self):
with testtools.ExpectedException(ValueError):
self.parse(['key1:val', 'key2:val'])
def test_parse_mappings_succeeds_for_one_mapping(self):
self.assertEqual({'key': 'val'}, self.parse(['key:val']))
def test_parse_mappings_succeeds_for_n_mappings(self):
self.assertEqual({'key1': 'val1', 'key2': 'val2'},
self.parse(['key1:val1', 'key2:val2']))
def test_parse_mappings_succeeds_for_duplicate_value(self):
self.assertEqual({'key1': 'val', 'key2': 'val'},
self.parse(['key1:val', 'key2:val'], False))
def test_parse_mappings_succeeds_for_no_mappings(self):
self.assertEqual({}, self.parse(['']))
def test_parse_mappings_succeeds_for_nonuniq_key(self):
self.assertEqual({'key': ['val1', 'val2']},
self.parse(['key:val1', 'key:val2', 'key:val2'],
unique_keys=False))
class TestParseTunnelRangesMixin(object):
TUN_MIN = None
TUN_MAX = None
TYPE = None
_err_prefix = "Invalid network tunnel range: '%d:%d' - "
_err_suffix = "%s is not a valid %s identifier."
_err_range = "End of tunnel range is less than start of tunnel range."
def _build_invalid_tunnel_range_msg(self, t_range_tuple, n):
bad_id = t_range_tuple[n - 1]
return (self._err_prefix % t_range_tuple) + (self._err_suffix
% (bad_id, self.TYPE))
def _build_range_reversed_msg(self, t_range_tuple):
return (self._err_prefix % t_range_tuple) + self._err_range
def _verify_range(self, tunnel_range):
return plugin_utils.verify_tunnel_range(tunnel_range, self.TYPE)
def _check_range_valid_ranges(self, tunnel_range):
self.assertIsNone(self._verify_range(tunnel_range))
def _check_range_invalid_ranges(self, bad_range, which):
expected_msg = self._build_invalid_tunnel_range_msg(bad_range, which)
err = self.assertRaises(exc.NetworkTunnelRangeError,
self._verify_range, bad_range)
self.assertEqual(expected_msg, str(err))
def _check_range_reversed(self, bad_range):
err = self.assertRaises(exc.NetworkTunnelRangeError,
self._verify_range, bad_range)
expected_msg = self._build_range_reversed_msg(bad_range)
self.assertEqual(expected_msg, str(err))
def test_range_tunnel_id_valid(self):
self._check_range_valid_ranges((self.TUN_MIN, self.TUN_MAX))
def test_range_tunnel_id_invalid(self):
self._check_range_invalid_ranges((-1, self.TUN_MAX), 1)
self._check_range_invalid_ranges((self.TUN_MIN,
self.TUN_MAX + 1), 2)
self._check_range_invalid_ranges((self.TUN_MIN - 1,
self.TUN_MAX + 1), 1)
def test_range_tunnel_id_reversed(self):
self._check_range_reversed((self.TUN_MAX, self.TUN_MIN))
class TestGreTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
base.BaseTestCase):
TUN_MIN = p_const.MIN_GRE_ID
TUN_MAX = p_const.MAX_GRE_ID
TYPE = p_const.TYPE_GRE
class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
base.BaseTestCase):
TUN_MIN = p_const.MIN_VXLAN_VNI
TUN_MAX = p_const.MAX_VXLAN_VNI
TYPE = p_const.TYPE_VXLAN
class UtilTestParseVlanRanges(base.BaseTestCase):
_err_prefix = "Invalid network VLAN range: '"
_err_bad_count = "' - 'Need exactly two values for VLAN range'."
_err_bad_vlan = "' - '%s is not a valid VLAN tag'."
_err_range = "' - 'End of VLAN range is less than start of VLAN range'."
def _range_err_bad_count(self, nv_range):
return self._err_prefix + nv_range + self._err_bad_count
def _range_invalid_vlan(self, nv_range, n):
vlan = nv_range.split(':')[n]
return self._err_prefix + nv_range + (self._err_bad_vlan % vlan)
def _nrange_invalid_vlan(self, nv_range, n):
vlan = nv_range.split(':')[n]
v_range = ':'.join(nv_range.split(':')[1:])
return self._err_prefix + v_range + (self._err_bad_vlan % vlan)
def _vrange_invalid_vlan(self, v_range_tuple, n):
vlan = v_range_tuple[n - 1]
v_range_str = '%d:%d' % v_range_tuple
return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan)
def _vrange_invalid(self, v_range_tuple):
v_range_str = '%d:%d' % v_range_tuple
return self._err_prefix + v_range_str + self._err_range
class TestVlanNetworkNameValid(base.BaseTestCase):
def parse_vlan_ranges(self, vlan_range):
return plugin_utils.parse_network_vlan_ranges(vlan_range)
def test_validate_provider_phynet_name_mixed(self):
self.assertRaises(n_exc.PhysicalNetworkNameError,
self.parse_vlan_ranges,
['', ':23:30', 'physnet1',
'tenant_net:100:200'])
def test_validate_provider_phynet_name_bad(self):
self.assertRaises(n_exc.PhysicalNetworkNameError,
self.parse_vlan_ranges,
[':1:34'])
class TestVlanRangeVerifyValid(UtilTestParseVlanRanges):
def verify_range(self, vlan_range):
return plugin_utils.verify_vlan_range(vlan_range)
def test_range_valid_ranges(self):
self.assertIsNone(self.verify_range((1, 2)))
self.assertIsNone(self.verify_range((1, 1999)))
self.assertIsNone(self.verify_range((100, 100)))
self.assertIsNone(self.verify_range((100, 200)))
self.assertIsNone(self.verify_range((4001, 4094)))
self.assertIsNone(self.verify_range((1, 4094)))
def check_one_vlan_invalid(self, bad_range, which):
expected_msg = self._vrange_
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/distributions/python/ops/geometric.py
|
Python
|
bsd-2-clause
| 7,545
| 0.003446
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ impor
|
t absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
|
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
def __init__(self,
logits=None,
probs=None,
validate_args=True,
allow_nan_stats=False,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, counts):
if self.validate_args:
# We set `check_integer=False` since the CDF is defined on whole real
# line.
counts = math_ops.floor(
distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=False))
counts *= array_ops.ones_like(self.probs)
return array_ops.where(
counts < 0.,
array_ops.zeros_like(counts),
-math_ops.expm1(
(counts + 1) * math_ops.log1p(-self.probs)))
def _log_prob(self, counts):
if self.validate_args:
counts = distribution_util.embed_check_nonnegative_discrete(
counts, check_integer=True)
counts *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(counts)
safe_domain = array_ops.where(
math_ops.equal(counts, 0.),
array_ops.zeros_like(probs),
probs)
return counts * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
|
sposs/DIRAC
|
TransformationSystem/Agent/MCExtensionAgent.py
|
Python
|
gpl-3.0
| 4,963
| 0.032642
|
""" Agent to extend the number of tasks given the Transformation definition
"""
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/MCExtensionAgent'
class MCExtensionAgent( AgentModule ):
def __init__( self, agentName, loadName, baseAgentName, properties = {} ):
''' c'tor
'''
AgentModule.__init__( self, agentName, loa
|
dName, baseAgentName, properties )
self.transClient = TransformationClient()
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
self.transformationTypes = sorted( Operations().getValue(
|
'Transformations/ExtendableTransfTypes',
['MCSimulation', 'Simulation'] ) )
self.maxIterationTasks = self.am_getOption( 'TasksPerIteration', 50 )
self.maxFailRate = self.am_getOption( 'MaxFailureRate', 30 )
self.maxWaitingJobs = self.am_getOption( 'MaxWaitingJobs', 1000 )
#############################################################################
def initialize( self ):
'''Sets defaults
'''
self.am_setOption( 'shifterProxy', 'DataManager' )
gLogger.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
gLogger.info( "Will create a maximum of %s tasks per iteration" % self.maxIterationTasks )
gLogger.info( "Will not submit tasks for transformations with failure rate greater than %s%%" % ( self.maxFailRate ) )
gLogger.info( "Will not submit tasks for transformations with more than %d waiting jobs" % self.maxWaitingJobs )
return S_OK()
#############################################################################
def execute( self ):
''' The MCExtensionAgent execution method.
'''
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'MCExtensionAgent is disabled by configuration option EnableFlag' )
return S_OK( 'Disabled via CS flag' )
# Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( {'Status':'Active', 'Type':self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
transID = transDict['TransformationID']
maxTasks = transDict['MaxNumberOfTasks']
self.extendTransformation( transID, maxTasks )
return S_OK()
def extendTransformation( self, transID, maxTasks ):
gLogger.info( "Considering extension of transformation %d" % transID )
# Get the current count of tasks submitted for this transformation
res = self.transClient.getTransformationTaskStats( transID )
if not res['OK']:
if res['Message'] != 'No records found':
gLogger.error( "Failed to get task statistics", "%s %s" % ( transID, res['Message'] ) )
return res
else:
statusDict = {}
else:
statusDict = res['Value']
gLogger.verbose( "Current task count for transformation %d" % transID )
for status in sorted( statusDict.keys() ):
statusCount = statusDict[status]
gLogger.verbose( "%s : %s" % ( status.ljust( 20 ), str( statusCount ).rjust( 8 ) ) )
# Determine the number of tasks to be created
numberOfTasks = self._calculateTaskNumber( maxTasks, statusDict )
if not numberOfTasks:
gLogger.info( "No tasks required for transformation %d" % transID )
return S_OK()
# Extend the transformation by the determined number of tasks
res = self.transClient.extendTransformation( transID, numberOfTasks )
if not res['OK']:
gLogger.error( "Failed to extend transformation", "%s %s" % ( transID, res['Message'] ) )
return res
gLogger.info( "Successfully extended transformation %d by %d tasks" % ( transID, numberOfTasks ) )
return S_OK()
def _calculateTaskNumber( self, maxTasks, statusDict ):
''' Utility function
'''
done = statusDict.get( 'Done', 0 )
failed = statusDict.get( 'Failed', 0 )
waiting = statusDict.get( 'Waiting', 0 )
total = statusDict.get( 'Created', 0 )
# If the failure rate is higher than acceptable
if ( total != 0 ) and ( ( 100.0 * float( failed ) / float( total ) ) > self.maxFailRate ):
return 0
# If we already have enough completed jobs
if done >= maxTasks:
return 0
if waiting > self.maxWaitingJobs:
return 0
numberOfTasks = maxTasks - ( total - failed )
if numberOfTasks > self.maxIterationTasks:
numberOfTasks = self.maxIterationTasks
return numberOfTasks
|
hlmnrmr/superdesk-core
|
superdesk/text_utils.py
|
Python
|
agpl-3.0
| 4,433
| 0.001805
|
# -*- coding: utf-8; -*-
#
# This file is p
|
art of Superdesk.
#
# Copyright 2013, 2017 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import re
from lxml import etree # noqa
from superdesk import etree as sd_etree
from lxml import html as lxml_html
from lxml.html import clean
# This pattern matches http(s) links, numbers (1.000.000 or 1,000,000 or 1 000 000), regulars w
|
ords,
# compound words (e.g. "two-done") or abbreviation (e.g. D.C.)
# If you modify please keep in sync with superdesk-client/core/scripts/apps/authoring/authoring/directives/WordCount.js
WORD_PATTERN = re.compile(r'https?:[^ ]*|([0-9]+[,. ]?)+|([\w]\.)+|[\w][\w-]*')
def get_text_word_count(text):
"""Get word count for given plain text.
:param str text: text string
:return int: word count
"""
return sum(1 for word in WORD_PATTERN.finditer(text))
def get_text(markup, content='xml', lf_on_block=False, space_on_elements=False):
"""Get plain text version of (X)HTML or other XML element
if the markup can't be parsed, it will be returned unchanged
:param str markup: string to convert to plain text
:param str content: 'xml' or 'html', as in parse_html
:param bool lf_on_block: if True, add a line feed on block elements' tail
:param bool space_on_elements: if True, add a space on each element's tail
mainly used to count words with non HTML markup
:return str: plain text version of markup
"""
try:
root = sd_etree.parse_html(
markup,
content=content,
lf_on_block=lf_on_block,
space_on_elements=space_on_elements)
text = etree.tostring(root, encoding='unicode', method='text')
return text
except etree.ParseError:
return markup
def get_word_count(markup, no_html=False):
"""Get word count for given html.
:param str markup: xhtml (or other xml) markup
:param bool no_html: set to True if xml param is not (X)HTML
if True, a space will be added after each element to separate words.
This avoid to have construct like <hl2>word</hl2><p>another</p> (like in NITF)
being counted as one word.
:return int: count of words inside the text
"""
if no_html:
return get_text_word_count(get_text(markup, content='xml', space_on_elements=True))
else:
return get_text_word_count(get_text(markup, content='html', lf_on_block=True))
def update_word_count(update, original=None):
"""Update word count if there was change in content.
:param update: created/updated document
:param original: original document if updated
"""
if update.get('body_html'):
update.setdefault('word_count', get_word_count(update.get('body_html')))
else:
# If the body is removed then set the count to zero
if original and 'word_count' in original and 'body_html' in update:
update['word_count'] = 0
def get_char_count(html):
"""Get character count for given html.
:param html: html string to count
:return int: count of chars inside the text
"""
return len(get_text(html))
def get_reading_time(word_count):
"""Get estimanted number of minutes to read a text
Check https://dev.sourcefabric.org/browse/SDFID-118 for details
:param int word_count: number of words in the text
:return int: estimated number of minute to read the text
"""
reading_time_float = word_count / 250
reading_time_minutes = int(reading_time_float)
reading_time_rem_sec = int((reading_time_float - reading_time_minutes) * 60)
if reading_time_rem_sec >= 30:
reading_time_minutes += 1
return reading_time_minutes
def sanitize_html(html):
"""Sanitize HTML
:param str html: unsafe HTML markup
:return str: sanitized HTML
"""
if not html:
return ""
blacklist = ["script", "style", "head"]
root_elem = lxml_html.fromstring(html)
cleaner = clean.Cleaner(
add_nofollow=False,
kill_tags=blacklist
)
cleaned_xhtml = cleaner.clean_html(root_elem)
safe_html = etree.tostring(cleaned_xhtml, encoding="unicode")
# the following code is legacy (pre-lxml)
if safe_html == ", -":
return ""
return safe_html
|
yzl0083/orange
|
Orange/OrangeWidgets/Visualize/__init__.py
|
Python
|
gpl-3.0
| 266
| 0
|
"""
=========
Visualize
=========
Widgets for data visualization.
"""
# Category description for the widget registry
NAME = "Visualize"
DESCRIPTION = "Widgets for data visualization.
|
"
|
BACKGROUND = "#FFB7B1"
ICON = "icons/Category-Visualize.svg"
PRIORITY = 2
|
googleapis/python-tasks
|
samples/generated_samples/cloudtasks_v2beta2_generated_cloud_tasks_cancel_lease_sync.py
|
Python
|
apache-2.0
| 1,442
| 0.000693
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CancelLease
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-tasks
# [START cloudtasks_v2beta2_generated_CloudTasks_CancelLeas
|
e_sync]
from google.cloud import tasks_v2beta2
def sample_cancel
|
_lease():
# Create a client
client = tasks_v2beta2.CloudTasksClient()
# Initialize request argument(s)
request = tasks_v2beta2.CancelLeaseRequest(
name="name_value",
)
# Make the request
response = client.cancel_lease(request=request)
# Handle the response
print(response)
# [END cloudtasks_v2beta2_generated_CloudTasks_CancelLease_sync]
|
michaelbrooks/uw-message-coding
|
message_coding/apps/coding/migrations/0007_auto_20150619_2106.py
|
Python
|
mit
| 744
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
|
('coding', '0006_auto_20150619_2103'),
]
operations = [
migrations.AlterField(
model_name='code',
name='name',
field=models.CharField(default=b'', max_length=150),
),
migrations.AlterField(
model_name='codegroup',
name='name',
field=models.CharField(default=b'', max_length=150),
)
|
,
migrations.AlterField(
model_name='scheme',
name='name',
field=models.CharField(default=b'', max_length=150),
),
]
|
DavidAwad/HeroquestBot
|
app/__init__.py
|
Python
|
gpl-2.0
| 184
| 0.01087
|
from flask import Flask
from flask import request
|
from flask import render_template
from
|
flask import redirect
from flask import url_for
app = Flask(__name__)
from app import views
|
devananda/ironic
|
ironic/common/network.py
|
Python
|
apache-2.0
| 1,491
| 0
|
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def get_node_vif_ids(task):
"""Get all VIF ids for a node.
This function does not handle multi node operations.
:param task: a TaskManager instance.
:returns: A dict of Node's neutron ports where keys are
'ports' & 'portgroups' and the values are dict of UUIDs
and their associated VIFs, e.g.
::
{'ports': {'port.uuid': vif.id},
'portgroups': {'portgroup.uuid': vif.id}}
"""
vifs = {}
portgroup_vifs = {}
port_vifs = {}
for portgroup in task.portgroups:
vif = portgroup.extra.get('vif_port_id')
if vif:
portgroup_vifs[portgroup.uuid] = vif
vifs['po
|
rtgroups'] = portgroup_vifs
for port in task.ports:
vif = port.extra.get('vif_port_id')
if vif:
port_vifs[port.uuid] = vif
vifs['p
|
orts'] = port_vifs
return vifs
|
OpenPLi/enigma2
|
lib/python/Components/ImportChannels.py
|
Python
|
gpl-2.0
| 6,369
| 0.021667
|
import threading
import urllib2
import os
import shutil
import tempfile
from json import loads
from enigma import eDVBDB, eEPGCache
from Screens.MessageBox import MessageBox
from config import config, ConfigText
from Tools import Notifications
from base64 import encodestring
from urllib import quote
from time import sleep
import xml.etree.ElementTree as et
settingfiles = ('lamedb', 'bouquets.', 'userbouquet.', 'blacklist', 'whitelist', 'alternatives.')
class ImportChannels():
def __init__(self):
if config.usage.remote_fallback_enabled.value and config.usage.remote_fallback_import.value and config.usage.remote_fallback.value and not "ChannelsImport" in [x.name for x in threading.enumerate()]:
self.header = None
if config.usage.remote_fallback_enabled.value and config.usage.remote_fallback_import.value and config.usage.remote_fallback_import_url.value != "same" and config.usage.remote_fallback_import_url.value:
self.url = config.usage.remote_fallback_import_url.value.rsplit(":", 1)[0]
else:
self.url = config.usage.remote_fallback.value.rsplit(":", 1)[0]
if config.usage.remote_fallback_openwebif_customize.value:
self.url = "%s:%s" % (self.url, config.usage.remote_fallback_openwebif_port.value)
if config.usage.remote_fallback_openwebif_userid.value and config.usage.remote_fallback_openwebif_password.value:
self.header = "Basic %s" % encodestring("%s:%s" % (config.usage.remote_fallback_openwebif_userid.value, config.usage.remote_fallback_openwebif_password.value)).strip()
self.remote_fallback_import = config.usage.remote_fallback_import.value
self.thread = threading.Thread(target=self.threaded_function, name="ChannelsImport")
self.thread.start()
def getUrl(self, url, timeout=5):
request = urllib2.Request(url)
if self.header:
request.add_header("Authorization", self.header)
try:
result = urllib2.urlopen(request, timeout=timeout)
except urllib2.URLError, e:
if "[Errno -3]" in str(e.reason):
print "[Import Channels] Network is not up yet, delay 5 seconds"
# network not up yet
sleep(5)
return self.getUrl(url, timeout)
print "[Import Channels] URLError ", e
raise e
return result
def getTerrestrialUrl(self):
url = config.usage.remote_fallback_dvb_t.value
return url[:url.rfind(":")] if url else self.url
def getFallbackSettings(self):
return self.getUrl("%s/web/settings" % self.getTerrestrialUrl()).read()
def getFallbackSettingsValue(self, settings, e2settingname):
root = et.fromstring(settings)
for e2setting in root:
if e2settingname in e2setting[0].text:
return e2setting[1].text
return ""
def getTerrestrialRegion(self, settings):
description = ""
descr = self.getFallbackSettingsValue(settings, ".terrestrial")
if "Europe" in descr:
description = "fallback DVB-T/T2 Europe"
if "Australia" in descr:
description = "fallback DVB-T/T2 Australia"
config.usage.remote_fallback_dvbt_region.value = description
def threaded_function(self):
settings = self.getFallbackSettings()
self.getTerrestrialRegion(settings)
self.tmp_dir = tempfile.mkdtemp(prefix="ImportChannels")
if "epg" in self.remote_fallback_import:
print "[Import Channels] Writing epg.dat file on sever box"
try:
self.getUrl("%s/web/saveepg" % self.url, timeout=30).read()
except:
self.ImportChannels
|
Done(False, _("Error when writing epg.dat on server"))
return
print "[Import Channels] Get EPG Location"
try:
epgdatfile = self.getFallbackSettingsValue(settings, "config.misc.epgcache_filename") or "/hdd/epg.dat"
try:
files = [file for file in loa
|
ds(self.getUrl("%s/file?dir=%s" % (self.url, os.path.dirname(epgdatfile))).read())["files"] if os.path.basename(file).startswith(os.path.basename(epgdatfile))]
except:
files = [file for file in loads(self.getUrl("%s/file?dir=/" % self.url).read())["files"] if os.path.basename(file).startswith("epg.dat")]
epg_location = files[0] if files else None
except:
self.ImportChannelsDone(False, _("Error while retreiving location of epg.dat on server"))
return
if epg_location:
print "[Import Channels] Copy EPG file..."
try:
open(os.path.join(self.tmp_dir, "epg.dat"), "wb").write(self.getUrl("%s/file?file=%s" % (self.url, epg_location)).read())
shutil.move(os.path.join(self.tmp_dir, "epg.dat"), config.misc.epgcache_filename.value)
except:
self.ImportChannelsDone(False, _("Error while retreiving epg.dat from server"))
return
else:
self.ImportChannelsDone(False, _("No epg.dat file found server"))
if "channels" in self.remote_fallback_import:
print "[Import Channels] reading dir"
try:
files = [file for file in loads(self.getUrl("%s/file?dir=/etc/enigma2" % self.url).read())["files"] if os.path.basename(file).startswith(settingfiles)]
for file in files:
file = file.encode("UTF-8")
print "[Import Channels] Downloading %s" % file
try:
open(os.path.join(self.tmp_dir, os.path.basename(file)), "wb").write(self.getUrl("%s/file?file=%s" % (self.url, quote(file))).read())
except Exception, e:
print "[Import Channels] Exception: %s" % str(e)
self.ImportChannelsDone(False, _("ERROR downloading file %s") % file)
return
except:
self.ImportChannelsDone(False, _("Error %s") % self.url)
return
print "[Import Channels] Removing files..."
files = [file for file in os.listdir("/etc/enigma2") if file.startswith(settingfiles)]
for file in files:
os.remove(os.path.join("/etc/enigma2", file))
print "[Import Channels] copying files..."
files = [x for x in os.listdir(self.tmp_dir) if x.startswith(settingfiles)]
for file in files:
shutil.move(os.path.join(self.tmp_dir, file), os.path.join("/etc/enigma2", file))
self.ImportChannelsDone(True, {"channels": _("Channels"), "epg": _("EPG"), "channels_epg": _("Channels and EPG")}[self.remote_fallback_import])
def ImportChannelsDone(self, flag, message=None):
shutil.rmtree(self.tmp_dir, True)
if flag:
Notifications.AddNotificationWithID("ChannelsImportOK", MessageBox, _("%s imported from fallback tuner") % message, type=MessageBox.TYPE_INFO, timeout=5)
else:
Notifications.AddNotificationWithID("ChannelsImportNOK", MessageBox, _("Import from fallback tuner failed, %s") % message, type=MessageBox.TYPE_ERROR, timeout=5)
|
sysadminmatmoz/odoo-clearcorp
|
product_invoice_report/__openerp__.py
|
Python
|
agpl-3.0
| 1,883
| 0.002124
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Product Invoice Report',
'description': """
Product Invoice Report
=======================
This module permits print the product invoice report.
""",
'version': '1.0',
'author': 'ClearCorp',
'category': 'Sa
|
les Management',
'website': "http://clearcorp.co.cr",
'complexity': 'normal',
'images' : [],
'depends': [
'account',
'sale',
'report',
'report_xls_template'
],
'data': [
'views/report_product_invoice_pdf.xml',
'views/report_product_invoice_xls.xml',
'p
|
roduct_invoice_report_report.xml',
'wizard/wizard.xml',
],
'test' : [],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
'license': 'AGPL-3',
}
|
davidastephens/pyportfolio
|
pyportfolio/__init__.py
|
Python
|
bsd-3-clause
| 131
| 0.007634
|
__version__ = version = '0.0.1'
from pypor
|
tfolio.models import Equity, Option, Future, Account, Trade
|
, Commodity, Index, Currency
|
nevins-b/lemur
|
lemur/tests/test_notifications.py
|
Python
|
apache-2.0
| 3,324
| 0.003309
|
import pytest
from lemur.notifications.views import * # noqa
from .vectors import VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN
def test_notification_input_schema(client, notification_plugin, notification):
from lemur.notifications.schemas import NotificationInputSchema
input_data = {
'label': 'notification1',
'options': {},
'description': 'my notification',
'active': True,
'plugin': {
'slug': 'test-notification'
}
}
data, errors = NotificationInputSchema().load(input_data)
assert not errors
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_notification_get(client, notification_plugin, notification, token, status):
assert client.get(api.url_for(Notifications, notification_id=notification.id), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_notification_post_(client, token, status):
assert client.post(api.url_for(Notifications, notification_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_notification_put(client, token, status):
assert client.put(api.url_for(Notifications, notification_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_notification_delete(client, token, status):
assert client.delete(api.url_for(Notifications, notification_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_notification_patch(client, token, status):
assert client.patch(api.url_for(Notifications, notification_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_notification_list_post_(client, token, status):
assert client.post(api.url_for(Notificatio
|
nsList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_notification_list_get(client, notification_plugin, notification, token, status):
assert client.get(api.url_for(NotificationsList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
|
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_notification_list_delete(client, token, status):
assert client.delete(api.url_for(NotificationsList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_notification_list_patch(client, token, status):
assert client.patch(api.url_for(NotificationsList), data={}, headers=token).status_code == status
|
refnil/CS_Game_Practice
|
test.py
|
Python
|
apache-2.0
| 121
| 0.033058
|
def bl
|
oup(n):
for i in xrange(0,n):
print "%s pikachu lol %d soup
|
soup" % (" "*(i%10), i)
bloup(666)
|
Donkyhotay/MoonPy
|
zope/i18n/locales/__init__.py
|
Python
|
gpl-3.0
| 22,122
| 0.000271
|
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Locale and LocaleProvider Implementation.
$Id: __init__.py 38178 2005-08-30 21:50:19Z mj $
"""
import os
from datetime import datetime, date
from time import strptime
from zope.interface import implements
from zope.i18n.interfaces.locales import ILocale
from zope.i18n.interfaces.locales import ILocaleDisplayNames, ILocaleDates
from zope.i18n.interfaces.locales import ILocaleVersion, ILocaleIdentity
from zope.i18n.interfaces.locales import ILocaleTimeZone, ILocaleCalendar
from zope.i18n.interfaces.locales import ILocaleCurrency, ILocaleNumbers
from zope.i18n.interfaces.locales import ILocaleFormat, ILocaleFormatLength
from zope.i18n.interfaces.locales import ILocaleOrientation
from zope.i18n.format import NumberFormat, DateTimeFormat
from zope.i18n.locales.inheritance import \
AttributeInheritance, InheritingDictionary, NoParentException
from zope.i18n.locales.provider import LocaleProvider, LoadLocaleError
# Setup the locale directory
from zope import i18n
LOCALEDIR = os.path.join(os.path.dirname(i18n.__file__), "locales", "data")
# Global LocaleProvider. We really just need this single one.
locales = LocaleProvider(LOCALEDIR)
# Define some constants that can be used
JANUARY = 1
FEBRUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
dayMapping = {'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4,
'fri': 5, 'sat': 6, 'sun': 7}
BC = 1
AD = 2
class LocaleIdentity(object):
"""Represents a unique identification of the locale
This class does not have to deal with inheritance.
Examples::
>>> id = LocaleIdentity('en')
>>> id
<LocaleIdentity (en, None, None, None)>
>>> id = LocaleIdentity('en', 'latin')
>>> id
<LocaleIdentity (en, latin, None, None)>
>>> id = LocaleIdentity('en', 'latin', 'US')
>>> id
<LocaleIdentity (en, latin, US, None)>
>>> id = LocaleIdentity('en', 'latin', 'US', 'POSIX')
>>> id
<LocaleIdentity (en, latin, US, POSIX)>
>>> id = LocaleIdentity('en', None, 'US', 'POSIX')
>>> id
<LocaleIdentity (en, None, US, POSIX)>
"""
implements(ILocaleIdentity)
def __init__(self, language=None, script=None, territory=None, variant=None):
"""Initialize object."""
self.language = language
self.script = script
self.territory = territory
self.variant = variant
def __repr__(self):
"""See zope.i18n.interfaces.ILocaleIdentity
"""
return "<LocaleIdentity (%s, %s, %s, %s)>" %(
self.language, self.script, self.territory, self.variant)
class LocaleVersion(object):
"""Represents a particular version of a locale
This class does not have to deal with inheritance.
Examples::
>>> cmp(LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes'),
... LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes again'))
0
>>> cmp(LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes'),
... LocaleVersion('1.0', datetime(2004, 1, 2), 'no notes again'))
-1
>>> cmp(LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes'),
... LocaleVersion('0.9', datetime(2004, 1, 2), 'no notes again'))
-1
>>> cmp(LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes'),
... LocaleVersion('0.9', datetime(2004, 1, 1), 'no notes again'))
1
"""
implements(ILocaleVersion)
def __init__(self, number, generationDate, notes):
"""Initialize object."""
self.number = number
assert(isinstance(generationDate, (date, type(None))))
self.generationDate = generationDate
self.notes = notes
def __cmp__(self, other):
"See zope.i18n.interfaces.ILocaleVersion"
return cmp((self.generationDate, self.number),
(other.generationDate, other.number))
class LocaleDisplayNames(AttributeInheritance):
"""Locale display names with inheritable data.
Examples::
>>> from zope.i18n.locales.tests.test_docstrings import \\
... LocaleInheritanceStub
>>> root = LocaleInheritanceStub()
>>> root.displayNames = LocaleDisplayNames()
>>> root.displayNames.languages = ['en', 'de']
>>> root.displayNames.keys = ['foo', 'bar']
>>> locale = LocaleInheritanceStub(nextLocale=root)
>>> locale.displayNames = LocaleDisplayNames()
>>> locale.displayNames.keys = ['fu', 'bahr']
Here you can see the inheritance in action
>>> locale.displayNames.languages
['en', 'de']
>>> locale.displayNames.keys
['fu', 'bahr']
"""
implements(ILocaleDisplayNames)
class LocaleTimeZone(object):
"""Specifies one of the timezones of a specific locale.
The attributes of this class are not inherited, since all timezone
information is always provided together.
Example::
>>> tz = LocaleTimeZone('Europe/Berlin')
>>> tz.cities = ['Berlin']
>>> tz.names = {'standard': ('Mitteleuropaeische Zeit', 'MEZ'),
... 'daylight': ('Mitteleuropaeische Sommerzeit', 'MESZ')}
>>> tz.type
'Europe/Berlin'
>>> tz.cities
['Berlin']
"""
implements(ILocaleTimeZone)
def __init__(self, type):
"""Initialize the object."""
self.type = type
self.cities = []
self.names = {}
class LocaleFormat(object):
"""Specifies one of the format of a specific format length.
The attributes of this class are not inherite
|
d, since all format
information is always provided together. Note that this information by
itself is often not useful, since other calendar data is required to use
the specified pattern for formatting and parsing.
"""
implements(ILocaleFormat)
|
def __init__(self, type=None):
"""Initialize the object."""
self.type = type
self.displayName = u''
self.pattern = u''
class LocaleFormatLength(AttributeInheritance):
"""Specifies one of the format lengths of a specific quantity, like
numbers, dates, times and datetimes."""
implements(ILocaleFormatLength)
def __init__(self, type=None):
"""Initialize the object."""
self.type = type
self.default = None
class LocaleCalendar(AttributeInheritance):
"""Represents locale data for a calendar, like 'gregorian'.
This object is particular tricky, since the calendar not only inherits
from higher-up locales, but also from the specified gregorian calendar
available for this locale. This was done, since most other calendars have
different year and era data, but everything else remains the same.
Example::
Even though the 'Locale' object has no 'calendar' attribute for real, it
helps us here to make the example simpler.
>>> from zope.i18n.locales.tests.test_docstrings import \\
... LocaleInheritanceStub
>>> root = LocaleInheritanceStub()
>>> root.calendar = LocaleCalendar('gregorian')
>>> locale = LocaleInheritanceStub(nextLocale=root)
>>> locale.calendar = LocaleCalendar('gregorian')
>>> root.calendar.months = InheritingDictionary(
... {1: (u'January', u'Jan'), 2: (u'February', u'Feb')})
>>> locale.calendar.months = InheritingDictionary(
... {2: (u'Februar', u'Feb'), 3: (u'Maerz', u'Mrz')})
>>> locale.calendar.getMonthName
|
ahonkela/pol2rna
|
python/filter_transcript_counts.py
|
Python
|
bsd-3-clause
| 244
| 0.008197
|
# python filter_transcript_co
|
unts.py < transcript_counts.txt > active_transcripts.txt
import sys
print "Gene\tTranscript\tExpression"
for l in sys.stdin:
t = l.strip().split('\t')
if float(t[2]) > 1.1:
|
print '\t'.join(t[0:3])
|
dokipen/trac
|
trac/web/session.py
|
Python
|
bsd-3-clause
| 11,069
| 0.000723
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import time
from trac.core import TracError
from trac.db.util import with_transaction
from trac.util import hex_entropy
from trac.util.html import Markup
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
self.last_visit = 0
self._new = True
self._old = {}
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
def __setitem__(self, key, value):
dict.__setitem__(self, key, unicode(value))
def get_session(self, sid, authenticated=False):
self.env.log.debug('Retrieving session for ID %r',
|
sid)
db = sel
|
f.env.get_db_cnx()
cursor = db.cursor()
self.sid = sid
self.authenticated = authenticated
cursor.execute("SELECT last_visit FROM session "
"WHERE sid=%s AND authenticated=%s",
(sid, int(authenticated)))
row = cursor.fetchone()
if not row:
return
self._new = False
self.last_visit = int(row[0] or 0)
cursor.execute("SELECT name,value FROM session_attribute "
"WHERE sid=%s and authenticated=%s",
(sid, int(authenticated)))
for name, value in cursor:
self[name] = value
self._old.update(self)
def save(self):
if not self._old and not self.items():
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
@with_transaction(self.env)
def delete_session_cookie(db):
cursor = db.cursor()
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
cursor.execute("INSERT INTO session "
" (sid,last_visit,authenticated)"
" VALUES (%s,%s,%s)",
(self.sid, self.last_visit, authenticated))
except Exception:
db.rollback()
self.env.log.warning('Session %s already exists', self.sid)
if self._old != self:
attrs = [(self.sid, authenticated, k, v)
for k, v in self.items()]
cursor.execute("DELETE FROM session_attribute WHERE sid=%s",
(self.sid,))
self._old = dict(self.items())
if attrs:
# The session variables might already have been updated
# by a concurrent request.
try:
cursor.executemany("INSERT INTO session_attribute "
" (sid,authenticated,name,value) "
" VALUES (%s,%s,%s,%s)", attrs)
except Exception:
db.rollback()
self.env.log.warning('Attributes for session %s '
'already updated', self.sid)
elif not authenticated:
# No need to keep around empty unauthenticated sessions
cursor.execute("DELETE FROM session "
"WHERE sid=%s AND authenticated=0",
(self.sid,))
return
# Update the session last visit time if it is over an hour old,
# so that session doesn't get purged
if now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
self.env.log.info("Refreshing session %s", self.sid)
cursor.execute('UPDATE session SET last_visit=%s '
'WHERE sid=%s AND authenticated=%s',
(self.last_visit, self.sid, authenticated))
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
mintime = now - PURGE_AGE
self.env.log.debug('Purging old, expired, sessions.')
cursor.execute("DELETE FROM session_attribute "
"WHERE authenticated=0 AND sid "
"IN (SELECT sid FROM session WHERE "
"authenticated=0 AND last_visit < %s)",
(mintime,))
cursor.execute("DELETE FROM session WHERE "
"authenticated=0 AND last_visit < %s",
(mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req):
super(Session, self).__init__(env, None)
self.req = req
if req.authname == 'anonymous':
if not req.incookie.has_key(COOKIE_KEY):
self.sid = hex_entropy(24)
self.bake_cookie()
else:
sid = req.incookie[COOKIE_KEY].value
self.get_session(sid)
else:
if req.incookie.has_key(COOKIE_KEY):
sid = req.incookie[COOKIE_KEY].value
self.promote_session(sid)
self.get_session(req.authname, authenticated=True)
def bake_cookie(self, expires=PURGE_AGE):
assert self.sid, 'Session ID not set'
self.req.outcookie[COOKIE_KEY] = self.sid
self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/'
self.req.outcookie[COOKIE_KEY]['expires'] = expires
if self.env.secure_cookies:
self.req.outcookie[COOKIE_KEY]['secure'] = True
def get_session(self, sid, authenticated=False):
refresh_cookie = False
if self.sid and sid != self.sid:
refresh_cookie = True
super(Session, self).get_session(sid, authenticated)
if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL:
refresh_cookie = True
# Refresh the session cookie if this is the first visit since over a day
if not authenticated and refresh_cookie:
self.bake_cookie()
def change_sid(self, new_sid):
assert self.req.authname == 'anonymous', \
'Cannot change ID of authenticated session'
assert new_sid, 'Session ID cannot be empty'
if new_sid == self.sid:
return
cursor = self.env.get_db_cnx().cursor()
cursor.execute("SELECT sid FROM session WHERE sid=%s", (new_sid,))
if cursor.fetchone():
raise TracError(Markup('Session "%s" already exists.<br />'
'Please choose a different session ID.')
% new_sid, 'Error renaming session')
self.env.log.debug('Changing ses
|
valsson/MD-MC-Codes-2016
|
MuellerBrown-MD/MD-MuellerBrown.py
|
Python
|
mit
| 1,665
| 0.013213
|
from MuellerBrown import getPotentialAndForces
from PlotUtils import PlotUtils
import numpy as np
import matplotlib.pyplot as plt
import MuellerBrown as mbpot
m=1.0
def getKineticEnergy(velocity):
return 0.5*m*(velocity[0]**2+velocity[1]**2)
dt = 0.01
num_steps = 1000
#initial_position = np.array( [ 0.0 , 0.0 ] )
initial_position = mbpot.saddlePoints[0]
in
|
itial_velocity = np.array( [ 1.0 , -1.0 ] )
position = np.zeros([num_steps+1,2])
velocity = np.zeros([num_steps+1,2])
potential_energy = np.zeros(num_steps+1)
kinetic_energy = np.zeros(num_steps+1)
total_energy = np.zeros(num_steps+1)
times = np.arange(num_steps+1)*dt
time = 0.0
position[0,:] = initial_position
velocity[0,:] = initial_velocity
kinetic_energy[0] = getKineticEnergy(initial_velocity)
(pot, force) = getPotentialAndForces(initial_position)
potential_energy[0] = pot
f
|
or i in range(0,num_steps):
# get position at t+dt
position[i+1] = position[i] + velocity[i]*dt+0.5*(force/m)*dt**2
# get velocity at t+dt
(new_pot, new_force) = getPotentialAndForces(position[i+1])
velocity[i+1] = velocity[i] + (0.5/m)*(new_force+force)*dt
# add stuff
kinetic_energy[i+1] = getKineticEnergy(velocity[i+1])
potential_energy[i+1] = new_pot
force = new_force
total_energy = potential_energy + kinetic_energy
pu = PlotUtils(mbpot,[200,200])
pu.plotPotential(trajectory=position)
plt.figure(1)
plt.plot(times,position[:,0])
plt.figure(2)
plt.plot(times,position[:,1])
plt.figure(3)
plt.plot(times,potential_energy)
plt.figure(4)
plt.plot(times,kinetic_energy)
plt.figure(5)
plt.ylim(0, np.max(total_energy)+1.0)
plt.plot(times,total_energy)
plt.show()
|
low-sky/simscript
|
postproc/pipeline.py
|
Python
|
gpl-2.0
| 893
| 0.00224
|
import commands
import sys
import postproc_yt as pp
import os
import shutil
targetdir = sys.argv[1]
timestep = float(sys.argv[2])
face = float(sys.argv[3])
level = float(sys.argv[4])
ppdir = os.getenv('PPDIR')
outdir = os.getenv('PPOUTDIR')
D = pp.FileSetup(targetdir, face=face, level=level,
timestep=timestep, ppdir=ppdir)
#pp.ProblemSetup(D['FileName'], face = face, dust_te
|
mp = D['GasTemp'])
os.chdir(D['TempDir'])
command = ppdir + 'radmc3d image npix ' + \
str(int(D['GridSize'])
|
) + \
' iline 1 widthkms 10 linenlam 500 loadlambda fluxcons inclline linelist nostar writepop doppcatch sizepc 10 norefine'
print(command)
result = commands.getoutput(command)
print(result)
save_name = os.path.join(outdir, D['FileName'][17:-5] + '_radmc.fits')
pp.MakeFits(fitsfile=save_name, dpc=260.0, toK=True)
shutil.move(save_name, outdir)
shutil.rmtree(D['TempDir'])
|
anthonyt/mingus-counterpoint
|
googlecode_upload.py
|
Python
|
gpl-3.0
| 9,994
| 0.008505
|
#!/usr/bin/env python
#
# !!!!!!!!! WARNING !!!!!!!!!!!!!!!
# This Script was bastardized To Read Password From /home/bspaans/.googlecode
#
#
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import http.client
import os.path
import optparse
import getpass
import base64
import sys
def get_svn_config_dir():
pass
def get_svn_auth(project_name, config_dir):
"""Return (username, password) for project_name in config_dir.
!!!!! CHANGED !!!!!!!!"""
f = open("/home/bspaans/.googlecode", 'r')
usr_data = f.read().split(":")
f.close()
return (usr_data[0], usr_data[1][:-1])
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = http.client.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determin
|
es the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + B
|
OUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
config_dir=None, user_name=None, tries=1):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
If config_dir is None, try get_svn_config_dir(); if it is 'none', skip
trying the Subversion configuration entirely. If user_name is not None, use
it for the first attempt; prompt for subsequent attempts.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
if config_dir != 'none':
# Try to load username/password from svn config for first try.
if config_dir is None:
config_dir = get_svn_config_dir()
(svn_username, password) = get_svn_auth(project_name, config_dir)
if user_name is None:
# If username was not supplied by caller, use svn config.
user_name = svn_username
else:
# Just initialize password for the first try.
password = None
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print('Please enter your googlecode.com password.')
print('** Note that this is NOT your Gmail account password! **')
print('It is the password you use to access Subversion repositories,')
print('and can be found here: http://code.google.com/hosting/settings')
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [http.client.FORBIDDEN]:
# Rest for another try.
tries = tries - 1
el
|
GGiecold/ECLAIR
|
src/ECLAIR/Build_instance/ECLAIR_core.py
|
Python
|
mit
| 58,120
| 0.01139
|
#!/usr/bin/env python
# ECLAIR/src/ECLAIR/Build_instance/ECLAIR_core.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""ECLAIR is a package for the robust and scalable
inference of cell lineages from gene expression data.
ECLAIR achieves a higher level of confidence in the estimated lineages
through the use of approximation algorithms for consensus clustering and by combining the information from an ensemble of minimum spanning trees
so as to come up with an improved, aggregated lineage tree.
In addition, the present package features several customized algorithms for assessing the similarity between weighted graphs or unrooted trees and for estimating the reproducibility of each edge to a given tree.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Conte, D., Foggia, P., Sansone, C. and Vento, M.,
"Thirty Years of Graph Matching in Pattern Recognition".
In: International Journal of Pattern Recognition and Artificial Intelligence,
18, 3, pp. 265-298. 2004
"""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import Concurrent_AP as AP
import Cluster_Ensembles as CE
import DBSCAN_multiplex
import Density_Sampling
from collections import defaultdict, namedtuple
import datetime
import igraph
from math import floor, sqrt
import numpy as np
import os
import psutil
import random
import scipy.sparse
from scipy.spatial.distance import _validate_vector
from sklearn.decomposition im
|
port PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.preprocessing import StandardScaler
import subprocess
from sys import exit
import tables
import time
__all__ = ['tree_path_integrals', 'ECLAIR_processing']
Data_info = namedtuple('Data_info', "data_file_name expected_N
|
_samples "
"skip_rows cell_IDs_column extra_excluded_columns "
"time_info_column")
AP_parameters = namedtuple('AP_parameters', "clustering_method max_iter "
"convergence_iter")
DBSCAN_parameters = namedtuple('DBSCAN_parameters', "clustering_method minPts "
"eps quantile metric")
HIERARCHICAL_parameters = namedtuple('HIERARCHICAL_parameters',
'clustering_method k')
KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k')
CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc')
Holder = namedtuple('Holder', "N_samples subsample_size N_runs name_tag "
"method run error_count")
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory().__dict__.iteritems():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: ECLAIR: ECLAIR_core: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
exit(1)
def KMEANS(data, k):
from sklearn.cluster import k_means, MiniBatchKMeans
if data.shape[0] < 50000:
centroids, cluster_labels, _ = k_means(data, k, init = 'k-means++', precompute_distances = 'auto', n_init = 20, max_iter = 300, n_jobs = 1)
else:
mbkm = MiniBatchKMeans(k, 'k-means++', max_iter = 300, batch_size = data.shape[0] / k, n_init = 20)
mbkm.fit(data)
centroids = mbkm.cluster_centers_
cluster_labels = mbkm.labels_
return centroids, cluster_labels
def hierarchical_clustering(data, n_clusters):
from .Scalable_SLINK import SLINK
from scipy.cluster.hierarchy import fcluster
assert isinstance(n_clusters, int) and n_clusters > 1
linkage_matrix = SLINK(data)
cluster_labels = fcluster(linkage_matrix, n_clusters - 1, 'maxclust')
return cluster_labels
def toHDF5(hdf5_file_name, data_file_name, expected_rows, sampling_fraction,
clustering_parameters, skip_rows, cell_IDs_column,
extra_excluded_columns, time_info_column, scaling = False,
PCA_flag = False, N_PCA = 10):
"""Read the data and store it in HDF5 format.
Also records the cell/sample names.
If applicable, create spaces in this data structure
for various arrays involved
in affinity propagation clustering.
Parameters
----------
hdf5_file_name : string or file object
data_file_name : string or file object
expected_rows : int
sampling_fraction : float
clustering_parameters : namedtuple
skip_rows : int
cell_IDs_column : int
extra_excluded_column : list
scaling : Boolean, optional (default = True)
Returns
-------
data : array (n_samples, n_features)
cell_IDs : array (n_samples,)
hdf5_file_name : file object or string
"""
assert isinstance(expected_rows,int)
assert isinstance(sampling_fraction, float) or isinstance(sampling_fraction, int)
assert isinstance(skip_rows, int)
assert isinstance(cell_IDs_column, int)
cell_IDs, time_info, data = dataProcessor(data_file_name, skip_rows,
cell_IDs_column, extra_excluded_columns, time_info_column)
unexpressed_indices = reportUnexpressed(data)
if unexpressed_indices.size != 0:
data = np.delete(data, unexpressed_indices, axis = 1)
cell_IDs = np.delete(cell_IDs, unexpressed_indices)
if time_info_column > 0:
time_info = np.delete(time_info, unexpressed_indices)
# Done with detecting unexpressed genes or reporters.
method = clustering_parameters.clustering_method
if scaling or method == 'DBSCAN':
data = StandardScaler().fit_transform(data)
if PCA_flag:
if not scaling:
data = StandardScaler().fit_transform(data)
pca = PCA(copy = True)
data = pca.fit_transform(data)[:, :N_PCA]
N_samples = data.shape[0]
|
kenwilcox/PlayingWithIronPython
|
PythonLibs.py
|
Python
|
mit
| 307
| 0.016287
|
im
|
port clr
import xmlutil
clr.AddReference('System.Xml')
from System.Xml import *
d = XmlDocument()
d.Load('C:\Program Files (x86)\IronPython 2.7\Tutorial\load.xml')
n = d.SelectNodes('//Puzzle/SavedGames/Game/@caption')
for e in n:
print e.Value
for e in xmlutil.Walk(d):
prin
|
t e.Name, e.Value
|
tbleiker/StreamBug
|
tests/cmd_interface_02_server_clients.py
|
Python
|
agpl-3.0
| 3,447
| 0
|
#!/usr/bin/env python
# coding: utf-8
#
# StreamBuddy - a video and data streaming serviweng zieleinfahrtce.
# Copyright (c) 2015, Tobias Bleiker & Dumeni Manatschal
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or F
|
ITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# Source on github:
# https://github.com/tbleiker/StreamBug
#
import multiprocessing
import time
import zmq
from streambug import cmd_interface
from streambug import mplogge
|
r
# set up logging
mplogger.setup(debug=True)
log = mplogger.getLogger()
def server_thread(zeromq_context, address, port_pull, port_pub):
server = cmd_interface.Server(zeromq_context, address, port_pull, port_pub)
server.start()
server.join()
def f1_thread(name, role, zeromq_context, address, port_pub, port_pull):
client = cmd_interface.Client(name, role, zeromq_context, address,
port_pub, port_pull)
def test():
return 'test successful'
client.add_command('test', test, 'simple test')
client.start()
# send an update and join client
time.sleep(0.5)
log.info('### Test 1: Send an update.')
client.send_update('This is an update message.')
client.join()
def c1_thread(name, role, zeromq_context, address, port_pub, port_pull):
client = cmd_interface.Client(name, role, zeromq_context, address,
port_pub, port_pull)
def update_func(msg):
log.info('Got update message: {msg}'.format(msg=msg))
client.set_update_func(update_func)
client.start()
time.sleep(2)
log.info('### Test 2: Get server status.')
client.get_server_status()
time.sleep(0.5)
log.info('### Test 3: Request help.')
client.get_help('F1')
time.sleep(0.5)
log.info('### Test 4: Send command test1.')
ret = client.send_cmd('F1', 'test')
log.info('Got: {ret}'.format(ret=ret))
if __name__ == '__main__':
zeromq_context = zmq.Context()
client_c1 = multiprocessing.Process(name='Client-c1', target=c1_thread,
args=('c1', 'commander',
zeromq_context, '0.0.0.0', 7001,
7000))
client_f1 = multiprocessing.Process(name='Client-f1', target=f1_thread,
args=('f1', 'follower', zeromq_context,
'0.0.0.0', 7001, 7000))
server = multiprocessing.Process(name='Server', target=server_thread,
args=(zeromq_context, '0.0.0.0', 7001,
7000))
server.start()
time.sleep(0.5)
log.info('### Starting clients...')
client_f1.start()
time.sleep(0.1)
client_c1.start()
time.sleep(0.1)
client_c1.join()
client_f1.terminate()
server.terminate()
|
afrachioni/umbrella
|
tools/plot_hist.py
|
Python
|
gpl-3.0
| 556
| 0.034173
|
#!/usr/bin/env python
import numpy
import re, os
import matplotlib.pyplot as plt
import pylab
a = 0.8
plt.rc('axes', color_cycle=[[0,0,a], [0,a,0], [a,0,0]])
files = os.listdir('logs')
lookup = sorted([[int(re.search('\d
|
+', elem).group(0)), elem]
for elem in files], key=lambda x:x[0])
for n, f in lookup:
if not f.endswith('.hist'): continue;
if n % 10 > 0: continue;
x = numpy.loadtxt('logs/' + f, ndmin=2)
if not x.size: conti
|
nue;
#plt.plot(x[:,2], numpy.log10(x[:,1]))
plt.plot(x[:,2], x[:,1])
pylab.savefig('plot.png')
|
koddsson/coredata-python-client
|
docs/conf.py
|
Python
|
mit
| 8,383
| 0.006084
|
# -*- coding: utf-8 -*-
""" Sphinx configuration file. """
#
# Coredata API client documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 6 19:20:17 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coredata API client'
copyright = u'2014, Kristjan Oddsson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.5'
# The full version, including alpha/beta/rc tags.
release = '0.1.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default
|
.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If no
|
t '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CoredataAPIclientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CoredataAPIclient.tex', u'Coredata API client Documentation',
u'Kristjan Oddsson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coredataapiclient', u'Coredata API client Documentation',
[u'Kristjan Oddsson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CoredataAPIclient', u'Coredata API client Documentation',
u'Kristjan Oddsson', 'CoredataAPIclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices
|
openvswitch/ovn-scale-test
|
rally_ovs/plugins/ovs/ovnclients.py
|
Python
|
apache-2.0
| 5,955
| 0.002015
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from rally.common import logging
from rally.common.utils import
|
RandomNameGeneratorMixin
from rally_ovs.plugins.ovs import ovsclients
from rally_ovs.plugins.ovs import utils
LOG = logging.getLogger(__name__)
class OvnClientMixin(ovsclients.ClientsMixin, RandomNameGeneratorMixin):
def _get_ovn_controller(self, install_method="sandbox"):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", install_method,
self.c
|
ontext['controller']['host_container'])
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl
def _start_daemon(self):
ovn_nbctl = self._get_ovn_controller(self.install_method)
return ovn_nbctl.start_daemon()
def _stop_daemon(self):
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.stop_daemon()
def _restart_daemon(self):
self._stop_daemon()
return self._start_daemon()
def _create_lswitches(self, lswitch_create_args, num_switches=-1):
self.RESOURCE_NAME_FORMAT = "lswitch_XXXXXX_XXXXXX"
if (num_switches == -1):
num_switches = lswitch_create_args.get("amount", 1)
batch = lswitch_create_args.get("batch", num_switches)
start_cidr = lswitch_create_args.get("start_cidr", "")
if start_cidr:
start_cidr = netaddr.IPNetwork(start_cidr)
mcast_snoop = lswitch_create_args.get("mcast_snoop", "true")
mcast_idle = lswitch_create_args.get("mcast_idle_timeout", 300)
mcast_table_size = lswitch_create_args.get("mcast_table_size", 2048)
LOG.info("Create lswitches method: %s" % self.install_method)
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.enable_batch_mode()
flush_count = batch
lswitches = []
for i in range(num_switches):
name = self.generate_random_name()
if start_cidr:
cidr = start_cidr.next(i)
name = "lswitch_%s" % cidr
else:
name = self.generate_random_name()
other_cfg = {
'mcast_snoop': mcast_snoop,
'mcast_idle_timeout': mcast_idle,
'mcast_table_size': mcast_table_size
}
lswitch = ovn_nbctl.lswitch_add(name, other_cfg)
if start_cidr:
lswitch["cidr"] = cidr
LOG.info("create %(name)s %(cidr)s" % \
{"name": name, "cidr": lswitch.get("cidr", "")})
lswitches.append(lswitch)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lswitches
def _create_routers(self, router_create_args):
self.RESOURCE_NAME_FORMAT = "lrouter_XXXXXX_XXXXXX"
amount = router_create_args.get("amount", 1)
batch = router_create_args.get("batch", 1)
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.enable_batch_mode()
flush_count = batch
lrouters = []
for i in range(amount):
name = self.generate_random_name()
lrouter = ovn_nbctl.lrouter_add(name)
lrouters.append(lrouter)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lrouters
def _connect_network_to_router(self, router, network):
LOG.info("Connect network %s to router %s" % (network["name"], router["name"]))
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
base_mac = [i[:2] for i in self.task["uuid"].split('-')]
base_mac[0] = str(hex(int(base_mac[0], 16) & 254))
base_mac[3:] = ['00']*3
mac = utils.get_random_mac(base_mac)
lrouter_port = ovn_nbctl.lrouter_port_add(router["name"], network["name"], mac,
str(network["cidr"]))
ovn_nbctl.flush()
switch_router_port = "rp-" + network["name"]
lport = ovn_nbctl.lswitch_port_add(network["name"], switch_router_port)
ovn_nbctl.db_set('Logical_Switch_Port', switch_router_port,
('options', {"router-port":network["name"]}),
('type', 'router'),
('address', 'router'))
ovn_nbctl.flush()
def _connect_networks_to_routers(self, lnetworks, lrouters, networks_per_router):
for lrouter in lrouters:
LOG.info("Connect %s networks to router %s" % (networks_per_router, lrouter["name"]))
for lnetwork in lnetworks[:networks_per_router]:
LOG.info("connect networks %s cidr %s" % (lnetwork["name"], lnetwork["cidr"]))
self._connect_network_to_router(lrouter, lnetwork)
lnetworks = lnetworks[networks_per_router:]
|
cfelton/gizflo
|
gizflo/toolchain/_toolflow.py
|
Python
|
gpl-3.0
| 3,668
| 0.004362
|
# Copyright (c) 2014 Christopher Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of
|
the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/license
|
s/>.
from __future__ import division
from __future__ import print_function
import os
class _toolflow(object):
def __init__(self, brd, top=None, name=None, path='.'):
"""
Provided a myhdl top-level module and board definition
This is the base-class for the various FPGA toolchains,
each toolchain will require a specific implementation,
this base-class provides the common features and defines
the function each toolflow needs to implement.
Arguments
top : myhdl top-level module
brd :
"""
self._path = path
# set the brd def top-level
if top is not None:
brd.set_top(top)
self.brd = brd
# determing a name for this run, should be the brd def
# name, or the top-level name, or user specified name.
# This name should be applied to the file names, project
# name, and the top-level (converted) module name.
# the _fpga object (brd def) will determine if the board
# name of top-level is used
self.name = brd.top_name if name is None else name
self._hdl_file_list = set()
self.logfn = None
@property
def path(self):
return self._path
@path.setter
def path(self, p):
self._path = p
def pathexist(self, pth):
if os.path.isfile(pth):
pth,fn = os.path.split(pth)
fpth = ''
path_split = os.path.split(pth)
for ppth in pth.split(os.path.sep):
fpth = os.path.join(fpth,ppth)
if not os.path.isdir(fpth):
print("path create %s" % (fpth,))
os.mkdir(fpth)
return os.path.isdir(pth)
def set_default_project_file(self, filename=None):
self._default_project_file = filename
def create_project(self, **pattr):
""" Create a project file if needed
"""
pass
def create_flow_script(self):
""" Create the tool-flow script if needed.
"""
pass
def create_constraints(self):
""" Create the constraints
"""
pass
def add_files(self, fn):
""" Add additional files to the tool-flow
"""
if isinstance(fn, str):
fn = {fn}
if isinstance(fn, (list, tuple, set)):
if not all(isinstance(ff, str) for ff in fn):
raise ValueError("Individual filenames must be strings")
else:
raise ValueError("Argument must be a string or a list/tuple/set of strings")
self._hdl_file_list.update(set(fn))
def add_cores(self, fn):
""" Add vendor specific cores
"""
def run(self, use='verilog', name=None):
""" Execute the tool-flow
use : indicated if Verilog or VHDL should be used.
name : user supplied name for project and top-level
"""
pass
|
pieterdp/serapeum.backup
|
serapeum/backup/__init__.py
|
Python
|
gpl-3.0
| 4,529
| 0.006845
|
from serapeum.backup.modules.config.arguments import Arguments
from serapeum.backup.modules.config import Config
from serapeum.backup.modules.log import logger
config = Config(Arguments().config_file)
from serapeum.backup.modules.ds.stack import Stack
from serapeum.backup.modules.files import Files
from serapeum.backup.modules.mysql import MySQLBackup
from serapeum.backup.modules.mail import Mail
from serapeum.backup.modules.remotes import Remotes
def job_queue(app_config):
jobs = Stack()
if app_config.config['BACKUP'].get('remote_host'):
jobs.add(Files(
backend=app_config.config['BACKUP'].get('backend'),
host=app_config.config['BACKUP'].get('remote_host')
))
elif app_config.config['BACKUP'].get('remote_host_list'):
for remote in Remotes(app_config.config['BACKUP'].get('remote_host_list')).remotes:
jobs.add(Files(
backend=app_config.config['BACKUP'].get('backend'),
host=remote
))
if app_config.config['MYSQL'].getboolean('backup_mysql') is True:
if app_config.config['MYSQL'].get('remote_loc'):
jobs.add(
MySQLBackup(local_path=app_config.config['MYSQL']['local_path'], server_host=config.config['MYSQL']['host'],
server_user=app_config.config['MYSQL']['username'],
server_password=app_config.config['MYSQL']['password'],
backup_destination_path=app_config.config['MYSQL']['backup_p
|
ath'],
backup_remote_host=app_config.config['MYSQL']['remote_loc'],
backup_remote_user=app_config.config['MYSQL']['remote_user'],
backup_ssh=app_config.config['MYSQL']['remote_ssh']))
|
elif app_config.config['MYSQL'].get('remote_list'):
for remote in Remotes(app_config.config['MYSQL'].get('remote_list')).remotes:
if app_config.config['BACKUP']['remote_role'] == 'source':
destination_path = '{0}/{1}'.format(app_config.config['MYSQL']['backup_path'], remote)
jobs.add(
MySQLBackup(local_path=app_config.config['MYSQL']['local_path'],
server_host=app_config.config['MYSQL']['host'],
server_user=app_config.config['MYSQL']['username'],
server_password=app_config.config['MYSQL']['password'],
backup_destination_path=destination_path,
backup_remote_host=remote,
backup_remote_user=app_config.config['MYSQL']['remote_user'],
backup_ssh=app_config.config['MYSQL']['remote_ssh']))
elif app_config.config['BACKUP']['remote_role'] == 'backup':
jobs.add(
MySQLBackup(local_path=app_config.config['MYSQL']['local_path'],
server_host=app_config.config['MYSQL']['host'],
server_user=app_config.config['MYSQL']['username'],
server_password=app_config.config['MYSQL']['password'],
backup_destination_path=app_config.config['MYSQL']['backup_path'],
backup_remote_host=remote,
backup_remote_user=app_config.config['MYSQL']['remote_user'],
backup_ssh=app_config.config['MYSQL']['remote_ssh']))
return jobs
def main():
jobs = job_queue(config)
failures = False
while True:
job = jobs.pop()
if job is None:
break
try:
job.run()
except Exception as e:
m = Mail(server=config.config['MAIL']['smtp_server'], port=config.config['MAIL']['smtp_port'],
username=config.config['MAIL']['smtp_username'], password=config.config['MAIL']['smtp_password'])
m.send(sender=config.config['MAIL']['smtp_username'], recipient=config.config['MAIL']['mail_dest'],
msg_text="{0}\n{1}".format(job.cmd_output, e), subject='The backup job for {0} ({1}) failed.'
.format(job.host, str(job)))
logger.exception(job.cmd_output)
failures = True
if failures is True:
return False
return True
|
shiminasai/plataforma_fadcanic
|
biblioteca/views.py
|
Python
|
mit
| 1,157
| 0.020743
|
from django.shortcuts import render
from .models import Temas, Biblioteca
from django.shortcuts import get_object_or_404
from django.db.models import Q
# Create your views here.
def index(request,template='biblioteca/index.html',slug=None):
temas = Temas.objects.all()
ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:6]
return render(request, template, locals())
def buscar_gu
|
ia(request, template='biblioteca/lis
|
ta_guias.html'):
buscar_palabra = request.GET.get('q')
resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra))
return render(request, template, locals())
def buscar_tema(request, template='biblioteca/lista_guias.html', id=None):
temas = Temas.objects.all()
buscar_palabra = get_object_or_404(Temas,id=id)
resultado = Biblioteca.objects.filter(tema=buscar_palabra)
return render(request, template, locals())
def detalle_guia(request,template='biblioteca/detalle.html',slug=None):
temas = Temas.objects.all()
la_guia = get_object_or_404(Biblioteca, slug=slug)
return render(request, template, locals())
|
tmatth/CloudSound
|
feldmanesque.py
|
Python
|
gpl-3.0
| 6,736
| 0.021229
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Charles Li <chuck@mixed-metaphors.com>
# Copyright (c) 2011 Tristan Matthews <le.businessman@gmail.com>
# This file is part of CloudSound.
# CloudSound is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# CloudSound is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with CloudSound. If not, see <http://www.gnu.org/licenses/>.
import urllib2,re,datetime,calendar,sys,getopt,json
from urllib2 import URLError
from pyo import *
SND_PATH = 'snds/'
URL_TIMEOUT = 100
def main(argv=None):
# citycode = "CAXX0301"
citycode = "CYUL"
f_len = 50
update_interval = 1800
sounds = []
if argv == None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hc:n:u:",["help"])
except getopt.error, msg:
print "Invalid options"
for o, a in opts:
if o in ("-h", "--help"):
print """
-c <weather.com citycode, default CAXX0301 (Montreal)>
-n <length of forecast in hours, from 1 to 180, default 50>
-u <data update interval in seconds, default 1800>
"""
sys.exit(2)
if o == "-c": citycode = a
if o == "-n": f_len = int(a)
if o == "-u": update_interval = int(a)
# start pyo server
s = Server(nchnls=2, buffersize=1024, duplex=0).boot()
s.start()
# initiate sounds
forecast = ScrapeHourly(citycode,f_len)
temp_melody = TempMelody(temp=forecast["temp"],clouds=forecast["clouds
|
"],feelslike=forecast["feelslike"])
# wind_melody = WindMelody(wspd=forecast["wspd"],wdir=forecast["wdir"],pop=forecast["pop"])
|
sounds.append(temp_melody._exc)
sounds.append(temp_melody._exc2)
# sounds.append(wind_melody._exc)
mix = Mix(sounds,2).out()
mixverb = Freeverb(mix,size=0.9,damp=0.95).out()
# reset_sounds(sounds, ambient_sounds)
# update_mixdown(sounds, ambient_sounds)
# update sound data every once in a while
while True:
time.sleep(update_interval)
# forecast = WeatherScrape(citycode)
# ----------- Start of weather scraping functions ---------------
# scrape current weather
hourly_regex = re.compile("hbhTDConditionIcon.*?(\d+)\.gif.*?"
"hbhTDCondition.*?(\d+).*?"
"hbhTDFeels.*?(\d+).*?"
"hbhTDPrecip.*?(\d+).*?"
"hbhTDHumidity.*?(\d+).*?"
"hbhTDWind.*?(Calm|([NSEW]+).*?(\d+))",re.DOTALL)
def ScrapeHourly_old(citycode,hourly_length):
hourly = {"length":hourly_length,"conditions":[],"temp":[],"feels":[],"pop":[],"humidity":[],"wind":[],"wind_dir":[]}
try:
url = urllib2.urlopen("http://www.weather.com/weather/hourbyhour/"+
citycode, timeout=URL_TIMEOUT).read()
except URLError:
print "Weather server timed out"
hourly_all = hourly_regex.findall(url)
print hourly_all
def ScrapeHourly(citycode,f_len):
try:
url = urllib2.urlopen('http://api.wunderground.com/api/59660bdc41616057/hourly7day/q/'+citycode+'.json')
except URLError:
print "Weather server timed out"
temp = []
dewpoint = []
clouds = []
humidity = []
wspd = []
wdir = []
pop = []
feelslike = []
forecast = json.loads(url.read())['hourly_forecast']
for i in range(0,len(forecast)):
temp.append(int(forecast[i]["temp"]["english"]) * 6)
temp.append(int(forecast[i]["dewpoint"]["english"]) * 6)
# dewpoint.append(int(forecast[i]["dewpoint"]["english"]) * 7)
clouds.append(int(forecast[i]["sky"])/10.0)
humidity.append(int(forecast[i]["humidity"])/10.0)
wspd.append(int(forecast[i]["wspd"]["metric"]))
wdir.append(int(forecast[i]["wdir"]["degrees"]))
pop.append(int(forecast[i]["pop"])/10.0)
feelslike.append(int(forecast[i]["feelslike"]["english"]) * 6)
feelslike.append(int(forecast[i]["humidity"]) * 6)
return {"temp":temp[:f_len],"clouds":clouds[:f_len],"humidity":humidity[:f_len],"wspd":wspd,"wdir":wdir,"pop":pop,"feelslike":feelslike[:f_len]}
class TempMelody(object):
def __init__(self,temp,clouds,feelslike,time=0.3,dur=3,mul=.2):
self._temp = temp
self._clouds = clouds
self._feelslike = feelslike
self._time = time
self._dur = dur
self._mul = mul
self._env = CosTable([(0,0),(140,1),(1370,0.45),(3600,0.23),(8191,0)])
# self._env2 = ChebyTable([0.8,0.5,0.9,0.2,0.3,0.2,0.1,0.1,0.1,0.1,0.1,0.1])
self._env2 = ExpTable([(0,0),(4096,1),(8192,0)], exp=5, inverse=True)
# self._env2 = HannTable()
self._seq = Seq(time=self._time,seq=self._clouds,poly=len(clouds)).play()
self._amp = TrigEnv(self._seq,table=self._env,dur=self._dur,mul=self._mul)
self._amp2 = TrigEnv(self._seq,table=self._env2,dur=self._dur,mul=self._mul)
self._exc = SineLoop(freq=self._temp,feedback=0.05,mul=self._amp)
self._exc2 = SineLoop(freq=self._feelslike,feedback=0.05,mul=self._amp2)
@property
def temp(self): return self._temp
@temp.setter
def temp(self,x): self._temp = x
@property
def clouds(self): return self._clouds
@clouds.setter
def clouds(self,x):self._clouds = x
@property
def time(self): return self._time
@time.setter
def time(self,x):self._time = x
@property
def dur(self): return self._dur
@dur.setter
def dur(self,x):self._dur = x
@property
def mul(self): return self._mul
@mul.setter
def mul(self,x):self._mul = x
@property
def env(self): return self._env
@env.setter
def env(self,x):self._env = x
class WindMelody(object):
def __init__(self,wspd,wdir,pop,time=.3,dur=2,mul=.2):
self._wspd = wspd
self._wdir = wdir
self._pop = pop
self._time = time
self._dur = dur
self._mul = mul
self._env = ParaTable()
self._env_old = ChebyTable([0.8,0.5,0.9,0.9])
self._seq = Seq(time=self._time,seq=self._pop,poly=len(self._pop)).play()
self._amp = TrigEnv(self._seq,table=self._env,dur=self._dur,mul=self._mul)
self._exc = SineLoop(freq=self._wdir,feedback=0.05,mul=self._amp)
if __name__ == "__main__":
sys.exit(main())
|
bigmlcom/python
|
bigml/tests/test_44_compare_predictions.py
|
Python
|
apache-2.0
| 22,664
| 0.001456
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Comparing remote and local predictions
"""
import sys
from .world import world, setup_module, teardown_module, show_doc, show_method
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_association_steps as association_create
from . import create_cluster_steps as cluster_create
from . import create_anomaly_steps as anomaly_create
from . import create_prediction_steps as prediction_create
from . import compare_predictions_steps as prediction_compare
class TestComparePrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1910-05-08T19:10:23.106","cat-0":"cat2","target-2":0.4}',
0.52477],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1920-06-30T20:21:20.320","cat-0":"cat1","target-2":0.2}',
0.50654]]
show_doc(self.test_scenario1, examples)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self)
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1b(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1932-01-30T19:24:11.440","cat-0":"cat2","target-2":0.1}',
0.54343],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1950-11-06T05:34:05.602","cat-0":"cat1" ,"target-2":0.9}',
0.5202]]
show_doc(self.test_scenario1b)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self, shared=example["data"])
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1b_a(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1969-7-14 17:36","cat-0":"cat2","target-2":0.9}',
0.93639]]
show_doc(self.test_scenario1b_a)
f
|
or example in examples:
example = dict(zip(headers, example))
|
show_method(self, sys._getframe().f_code.co_name, example)
|
bensk/CS9
|
_site/Code Examples/March21DoNow.py
|
Python
|
mit
| 361
| 0.01385
|
import random
random.randint(0, 3)
random.randint(0, 3)
print(random.randint(0, 3))
print(random.randint(0,
|
3))
print(random.randint(0, 3))
# What does randint do?
# What do the values 0 and 3 do? Try changing those numbers, rerun the program, and write down what changed.
# What is the difference between random.randi
|
nt(0,3) and print(random.randint(0,3))?
|
vulcansteel/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/auto_rest_http_infrastructure_test_service/operations/http_server_failure.py
|
Python
|
mit
| 6,474
| 0.000618
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpServerFailure(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head501(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 501 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: Error or (Error, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/http/failure/server/501'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get501(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 501 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: Error or (Error, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/http/failure/server/501'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post505(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 505 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: Error or (Error, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/http/failure/server/505'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete505(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 505 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the dire
|
ct response alongside the
deserialized response
:rtype: Error or (Error, requests.response) or
concurrent.futures.Future
"""
# Construct URL
url = '/http/failure/server/505'
# Construct parameters
query_para
|
meters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
quenette/COMPASS-I
|
t/scripts/test_pycparser.py
|
Python
|
apache-2.0
| 289
| 0.031142
|
import pycparser
def main_eg():
parser = p
|
ycparser.CParser()
buf = '''
int main( int argc, char** argv ) {
j = p && r || q;
return j;
}
'''
t = parser.parse( buf, 'x.c' )
return t
if __name__ == "__main__":
t = main_eg()
t.s
|
how()
|
tehasdf/AdventOfCode2016
|
p6.py
|
Python
|
mit
| 175
| 0.017143
|
import sys
from collections
|
impor
|
t Counter
rows = zip(*[l.strip() for l in sys.stdin])
print ''.join(Counter(l).most_common()[-1][0] for l in rows) # 0 zamiast -1 dla part1
|
kevin-coder/tensorflow-fork
|
tensorflow/python/saved_model/simple_save.py
|
Python
|
apache-2.0
| 4,171
| 0.001199
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel simple save functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['saved_model.simple_save'])
@deprecation.deprecated(
None,
'This function will only be available through the v1 compatibility '
'library as tf.compat.v1.saved_model.simple_save.')
def simple_save(session, export_dir, inputs, outputs, legacy_init_op=None):
"""Convenience function to build a SavedModel suitable for serving.
In many common cases, saving models for serving will be as simple as:
simple_sav
|
e(session,
export_dir,
inputs={"x": x, "y": y},
outputs={"z": z})
Although in many cases it's not necessary to understand all
|
of the many ways
to configure a SavedModel, this method has a few practical implications:
- It will be treated as a graph for inference / serving (i.e. uses the tag
`tag_constants.SERVING`)
- The SavedModel will load in TensorFlow Serving and supports the
[Predict
API](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto).
To use the Classify, Regress, or MultiInference APIs, please
use either
[tf.Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
or the lower level
[SavedModel
APIs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
- Some TensorFlow ops depend on information on disk or other information
called "assets". These are generally handled automatically by adding the
assets to the `GraphKeys.ASSET_FILEPATHS` collection. Only assets in that
collection are exported; if you need more custom behavior, you'll need to
use the
[SavedModelBuilder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py).
More information about SavedModel and signatures can be found here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md.
Args:
session: The TensorFlow session from which to save the meta graph and
variables.
export_dir: The path to which the SavedModel will be stored.
inputs: dict mapping string input names to tensors. These are added
to the SignatureDef as the inputs.
outputs: dict mapping string output names to tensors. These are added
to the SignatureDef as the outputs.
legacy_init_op: Legacy support for op or group of ops to execute after the
restore op upon a load.
"""
signature_def_map = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def_utils.predict_signature_def(inputs, outputs)
}
b = builder.SavedModelBuilder(export_dir)
b.add_meta_graph_and_variables(
session,
tags=[tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
main_op=legacy_init_op,
clear_devices=True)
b.save()
|
pseudo-cluster/pseudo-cluster
|
scripts/run_pseudo_tasks_slurm.py
|
Python
|
lgpl-2.1
| 7,019
| 0.011711
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import datetime
import time
import gettext
from pseudo_cluster.task import Task_record
from pseudo_cluster.tasks_list import Tasks_list
from pseudo_cluster.extended_task import Extended_task_record
from pseudo_cluster.actions_list import Action_list
def get_submit_string(self,time_limit,duration):
"""
Функция генерирует строку для submit
задачи в очередь slurm
"""
s=list()
s.append("sbatch")
#
# Uncomment for debug slurm
#
#s.append("-vv")
s.append("--account=%s" % self.task_class)
s.append("--comment=\"Pseudo cluster emulating task\"")
s.append("--job-name=\"pseudo_cluster|%s|%s\"" % (self.job_id, self.job_name))
try:
limit=self.other["memory_limit"]
except KeyError:
limit="0"
if int(limit) > 0:
s.append("--mem=%d" % int(limit))
s.append("--ntasks=%d" % self.required_cpus)
s.append("--partition=%s" % self.partition)
if self.priority !=0:
s.append("--priority=%d" % self.priority)
if time_limit > 0:
s.append("--time=%d" % time_limit)
#
# Path to this script must be available
# from environment variable PATH
#
s.append(self.path_to_task)
s.append("-t")
s.append(str(duration))
s.append("-s")
s.append(self.task_state)
return s
def get_cancel_string(self):
return [ "scancel" , str(self.actual_task_id) ]
def parse_task_id(self,f,first_line):
"""
Выковыривает ID задачи из файла и первой строчки,
которая была до этого прочитана в файле.
файл не закрывает.
"""
try:
tup=first_line.split(' ')
except:
return False
if (tup[0] == "Submitted") and (tup[1] == "batch"):
self.actual_task_id=int(tup[3])
return True
return False
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
gettext.install('pseudo-cluster')
parser= argparse.ArgumentParser(
description=_("""
Данная программа осуществляет постановку задач в очередь Slurm.
Список задач получается из файла статистики. При этом программа
ставит задачу в очередь с идентификатором пользователя и группы,
как они были указаны в статистике. Всё используемое задачами
время сжимается согласно коэффициента, и вместо реалного кода
программы, запускавшейся задачи запускается скрипт, который ничего
не делает определённое количество секунд.
"""),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=_("Например можно запустить так:\n ")+argv[0]+" --time-compress 30"
)
parser.add_argument(
'--time-compress',
dest='compress_times',
type=int,
required=True,
help=_("Во сколько раз сжимать время. Напиример: 10")
)
parser.add_argument(
'--time-interval',
dest='interval',
type=int,
required=False,
default=2,
help=_("Раз во сколько минут обращаться к системе ведения очередей")
)
parser.add_argument(
'--prefix',
dest='prefix',
required=False,
default="./",
help=_("префикс, по которому находится файл со статистикой")
)
parser.add_argument(
'--path-to-task-script',
dest='path_to_task',
required=False,
default="/usr/local/bin/pseudo_cluster_task.sh",
help=_("""
Путь до скрипта, который реализует тело задачи
в псевдокластере.
""")
)
args=parser.parse_args()
if os.geteuid() != 0:
print _("""
Данная программа требует
полномочий пользователя root.
Запустите её от имени пользователя root,
либо с использованием команды sudo.
""")
return 2
#
# Регистрация методов, которые будут вызываться для объекта
# класса Extended_task_record
#
Extended_task_record.get_submit_string=get_submit_string
Extended_task_record.get_cancel_string=get_cancel_string
Extended_task_record.parse_task_id=parse_task_id
tasks_list=Tasks_list()
tasks_list.read_statistics_from_file(args.prefix)
extended_tasks=dict()
num_tasks=len(tasks_list)
begin_time=tasks_list[0].time_submit
last_task=0;
actions_list=Action_list()
while last_task != num_tasks-1:
end_time=begin_time+datetime.timedelta(minutes=args.interval*args.compress_times)
begin_actions_time=datetime.datetime.utcnow()
for i in xrange(0,num_tasks):
if i < last_task:
continue
task=tasks_list[i]
if task.time_submit < begin_time:
last_task=i
if task.time_submit < end_time:
if task.job_id not in extended_tasks:
extended_task=Extended_task_record()
extended_task.fill_by_task(task,args.path_to_task)
actions_list.register_action(extended_task,"submit")
extended_tasks[task.job_id]=extended_task
if (task.time_end < end_time) and (task.task_state == "canceled"):
|
actions_list.register_action(extended_tasks[task.job_id],"canc
|
el")
actions_list.do_actions(args.compress_times)
print begin_time
print end_time
print "last_task=%d, num_tasks=%d" % (last_task,num_tasks)
delay_value = datetime.datetime.utcnow()- begin_actions_time
if delay_value < datetime.timedelta(minutes=args.interval):
how_much_sleep=args.interval*60-delay_value.total_seconds()
print (_("will sleep %d") % how_much_sleep)
time.sleep(how_much_sleep)
begin_time=end_time
if __name__ == "__main__":
sys.exit(main())
|
teodoc/home-assistant
|
homeassistant/components/sensor/efergy.py
|
Python
|
mit
| 4,293
| 0
|
"""
homeassistant.components.sensor.efergy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monitors home energy use as measured by an efergy
engage hub using its
|
(unofficial, undocumented) API.
Configuration:
To use the efergy sensor you will need to add something
like the following to your config/co
|
nfiguration.yaml
sensor:
platform: efergy
app_token: APP_TOKEN
utc_offset: UTC_OFFSET
monitored_variables:
- type: instant_readings
- type: budget
- type: cost
period: day
currency: $
Variables:
api_key
*Required
To get a new App Token, log in to your efergy account, go
to the Settings page, click on App tokens, and click "Add token".
utc_offset
*Required for some variables
Some variables (currently only the daily_cost) require that the
negative number of minutes your timezone is ahead/behind UTC time.
monitored_variables
*Required
An array specifying the variables to monitor.
period
*Optional
Some variables take a period argument. Valid options are "day",
1"week", "month", and "year"
currency
*Optional
This is used to display the cost/period as the unit when monitoring the
cost. It should correspond to the actual currency used in your dashboard.
"""
import logging
from requests import get
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://engage.efergy.com/mobile_proxy/'
SENSOR_TYPES = {
'instant_readings': ['Energy Usage', 'kW'],
'budget': ['Energy Budget', ''],
'cost': ['Energy Cost', ''],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the efergy sensor. """
app_token = config.get("app_token")
if not app_token:
_LOGGER.error(
"Configuration Error"
"Please make sure you have configured your app token")
return None
utc_offset = str(config.get("utc_offset"))
dev = []
for variable in config['monitored_variables']:
if 'period' not in variable:
variable['period'] = ''
if 'currency' not in variable:
variable['currency'] = ''
if variable['type'] not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(EfergySensor(variable['type'], app_token, utc_offset,
variable['period'], variable['currency']))
add_devices(dev)
# pylint: disable=too-many-instance-attributes
class EfergySensor(Entity):
""" Implements an Efergy sensor. """
# pylint: disable=too-many-arguments
def __init__(self, sensor_type, app_token, utc_offset, period, currency):
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self.app_token = app_token
self.utc_offset = utc_offset
self._state = None
self.period = period
self.currency = currency
if self.type == 'cost':
self._unit_of_measurement = self.currency + '/' + self.period
else:
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
""" Returns the name. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the efergy monitor data from the web service """
if self.type == 'instant_readings':
url_string = _RESOURCE + 'getInstant?token=' + self.app_token
response = get(url_string)
self._state = response.json()['reading'] / 1000
elif self.type == 'budget':
url_string = _RESOURCE + 'getBudget?token=' + self.app_token
response = get(url_string)
self._state = response.json()['status']
elif self.type == 'cost':
url_string = _RESOURCE + 'getCost?token=' + self.app_token \
+ '&offset=' + self.utc_offset + '&period=' \
+ self.period
response = get(url_string)
self._state = response.json()['sum']
else:
self._state = 'Unknown'
|
hodgesds/streamparse
|
streamparse/cli/quickstart.py
|
Python
|
apache-2.0
| 643
| 0
|
"""
Create new streamparse project template.
"""
from __future__ import absolute_import
from streamparse.bootstrap import quickstart
def subparser_hook(subparsers):
""" Hook to add subparser for this command. """
subparser = subparsers.add_parser('quickstart',
description=__doc__,
help=main.__doc__)
subparser.set_defaults(func=main)
subparser.add_argument('project_name',
help='Name of new streamparse project.')
def main(args):
""" Create new streamparse project template. """
quickstart(args
|
.project_na
|
me)
|
JCHappytime/MyQuantopian
|
strategies/strategy.py
|
Python
|
gpl-2.0
| 3,094
| 0.004848
|
#!/usr/bin/env python
LICENSE="""
Copyright (C) 2011 Michael Ihde
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from utils.date import ONE_DAY
import tables
class Strategy(object):
def __init__(self, start_date, end_date, initial_position, market, params, h5file=None):
self.start_date = start_date
self.end_date = end_date
self.initial_position = initial_position
self.market = market
self.params = params
# Manage indicators, the dictionary is:
# key = symbol
# value = dictionary(key="indicator name", value=indicator)
self.indicators = {}
# If the strategy was passed h5 info, use it to store information
self.h5file = h5file
if h5file != None:
self.indicator_h5group = h5file.createGroup("/", "Indicators")
self.strategy_h5group = h5file.createGroup("/", "Strategy")
def addIndicator(self, symbol, name, indicator):
if not self.indicators.has_key(symbol):
self.indicators[symbo
|
l] = {}
self.indicators[symbol][name] = indicator
if self.h5file != None:
try:
|
symgroup = self.h5file.getNode(self.indicator_h5group._v_pathname, symbol, classname="Group")
except tables.NoSuchNodeError:
symgroup = self.h5file.createGroup(self.indicator_h5group._v_pathname, symbol)
if self.h5file and self.indicator_h5group:
indicator.setupH5(self.h5file, symgroup, name)
def removeIndicator(self, symbol, name):
del self.indicators[symbol][name]
def updateIndicators(self, start_date, end_date=None):
for symbol, indicators in self.indicators.items():
ticker = self.market[symbol]
if end_date != None:
quotes = ticker[start_date:end_date] # Call this to cache everything
end = end_date
else:
end = start_date + ONE_DAY
d = start_date
while d < end:
quote = ticker[d]
if quote.adjclose != None:
for indicator in indicators.values():
indicator.update(quote.adjclose, d)
d += ONE_DAY
def evaluate(self, date, position):
raise NotImplementedError
def finalize(self):
self.h5file = None
self.indicator_h5group = None
self.strategy_h5group = None
|
penglee87/flaskweb
|
config.py
|
Python
|
mit
| 1,488
| 0.004704
|
import os
import pymysql
pymysql.install_as_MySQLdb()
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'smtp.163.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = 'jqblee@163.com'
MAIL_PASSWORD = '******'
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'jqblee@163.com'
FLASKY_ADMIN = 'penglee87@163.com'
FLASKY_POSTS_PER_PAGE = 20
FLASKY
|
_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
DEBUG = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or 'mysql://
|
flasky:flasky@127.0.0.1/flasky'
#'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
#'mysql+pymysql://flask:flask@127.0.0.1/flask'
#'mysql://flasky:flasky@127.0.0.1/flasky'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
pbecotte/devblog
|
backend/blog/utils.py
|
Python
|
mit
| 2,165
| 0.000462
|
from flask import request, abort, jsonify, render_template
from flask.ext.sqlalchemy import BaseQuery
import math
class PaginatedQuery(object):
def __init__(self, query_or_model, paginate_by, page_var='page',
check_bounds=False):
self.paginate_by = paginate_by
self.page_var = page_var
self.check_bounds = check_bounds
if isinstance(query_o
|
r_model, BaseQuery):
self.query = query_or_model
else:
self.model = query_or_model
self.query = self.model.all()
def get_page(self):
curr_page = request.args.get(self.page_var)
if curr_page and curr_page.isdigit():
return max(1, int(curr_page))
return 1
def get_page_count(self):
return int(math.ce
|
il(float(self.query.count()) / self.paginate_by))
def get_object_list(self):
if self.get_page_count() == 0:
return []
if self.check_bounds and self.get_page() > self.get_page_count():
abort(404)
return self.query.paginate(self.get_page(), self.paginate_by).items
def object_list(template_name, query, context_variable='object_list',
paginate_by=20, page_var='page', check_bounds=True, **kwargs):
paginated_query = PaginatedQuery(
query,
paginate_by,
page_var,
check_bounds)
kwargs[context_variable] = paginated_query.get_object_list()
return render_template(
template_name,
pagination=paginated_query,
page=paginated_query.get_page(),
**kwargs)
def json_object_list(query, context_variable='object_list',
paginate_by=20, page_var='page', check_bounds=True, **kwargs):
paginated_query = PaginatedQuery(
query,
paginate_by,
page_var,
check_bounds)
kwargs[context_variable] = paginated_query.get_object_list()
return jsonify(
pagination=paginated_query,
page=paginated_query.get_page(),
**kwargs)
def get_object_or_404(query, criteria):
q = query.filter(criteria)
if q.first():
return q.first()
else:
abort(404)
|
glennyonemitsu/MarkupHiveServer
|
src/model/admin.py
|
Python
|
mit
| 506
| 0
|
import base64
import hashlib
|
import json
import os
import bcrypt
from sqlalchemy.orm.exc import NoResultFound
from server_global import db
class Admin(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), nullable=False, unique=True)
password = db.Column(db.String(60), nullable=False)
def valid_password(self, password):
hashed_pw = bcrypt.hashpw(password, self.password)
match = hashed_pw == self.password
ret
|
urn match
|
tsnoam/Flexget
|
flexget/api/authentication.py
|
Python
|
mit
| 3,365
| 0.00208
|
import base64
from flask import request, jsonify, session as flask_session
from flask.ext.login import login_user, LoginManager, current_user, current_app
from flexget.api import api, APIResource, app
from flexget.webserver import User
from flexget.utils.database import with_session
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager
|
.request_loader
@with_session
def load_user_from_request(request, session=None):
auth_value = request.headers.get('Authorization')
if not auth_value:
return
# Login using api key
if auth_value.startswith('Token'):
try:
token = auth_value.replace('Token ', '', 1)
return session.query(User).filter(User.token ==
|
token).first()
except (TypeError, ValueError):
pass
# Login using basic auth
if auth_value.startswith('Basic'):
try:
credentials = base64.b64decode(auth_value.replace('Basic ', '', 1))
username, password = credentials.split(':')
return session.query(User).filter(User.name == username, User.password == password).first()
except (TypeError, ValueError):
pass
@login_manager.user_loader
@with_session
def load_user(username, session=None):
return session.query(User).filter(User.name == username).first()
@app.before_request
def check_valid_login():
# Allow access to root, login and swagger documentation without authentication
if request.path == '/' or request.path.startswith('/login') or \
request.path.startswith('/logout') or request.path.startswith('/swagger'):
return
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
# API Authentication and Authorization
login_api = api.namespace('login', description='API Authentication')
login_api_schema = api.schema('login', {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'}
}
})
login_parser = api.parser()
login_parser.add_argument('remember', type=bool, required=False, default=False, help='Remember for next time')
@login_api.route('/')
@api.doc(description='Login to API with username and password')
class LoginAPI(APIResource):
@api.expect(login_api_schema)
@api.response(400, 'Invalid username or password')
@api.response(200, 'Login successful')
@api.doc(parser=login_parser)
def post(self, session=None):
data = request.json
if data:
user = session.query(User)\
.filter(User.name == data.get('username').lower(), User.password == data.get('password'))\
.first()
if user:
args = login_parser.parse_args()
login_user(user, remember=args['remember'])
return {'status': 'success'}
return {'status': 'failed', 'message': 'Invalid username or password'}, 400
logout_api = api.namespace('logout', description='API Authentication')
@logout_api.route('/')
@api.doc(description='Logout and clear session cookies')
class LogoutAPI(APIResource):
@api.response(200, 'Logout successful')
def get(self, session=None):
flask_session.clear()
resp = jsonify({'status': 'success'})
resp.set_cookie('flexgetToken', '', expires=0)
return resp
|
InakiZabala/odoomrp-wip
|
product_packaging_through_attributes/__openerp__.py
|
Python
|
agpl-3.0
| 1,619
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Product Packaging through Attributes",
"version": "1.0",
"depends": [
"base",
"product",
"product_packaging_views",
"product_attribute_types",
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http
|
://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <ajuaristio@gmail.com>"
],
"category": "Custom Module",
|
"summary": "",
"data": [
"views/product_view.xml",
"views/res_partner_view.xml",
],
"installable": True,
"auto_install": False,
}
|
jacekdalkowski/bike-timer
|
web-database/db_migrations/migrate.py
|
Python
|
apache-2.0
| 2,224
| 0.022932
|
# migrate.py [up|seed|down] [local|docker]
# e.g. migrate.py up docker
import sys
import os
import re
from subprocess import call, check_output
from operator import itemgetter, attrgetter, methodcaller
CASSANDRA_PATH_LOCAL = '/Users/jacekdalkowski/Dev/_cassandra/apache-cassandra-3.0.0/bin/'
ARTIFACTS_PATH_LOCAL = '/Users/jacekdalkowski/Dev/bike_timer/web-database/db_migrations'
CASSANDRA_PATH_DOCKER = ''
ARTIFACTS_PATH_DOCKER = '/root/db_migrations'
def filename_prefix_to_int(file_name):
p = re.compile("(\d+).*")
m = p.search(file_name)
if m:
return int(m.group(1))
else:
return None
def db_operation(current_dir, file_sufix, env, reverse):
cassandra_path = None;
artifacts_path = None;
if env == 'local':
cassandra_path = CASSANDRA_PAT
|
H_LOCAL
artifacts_path = ARTIFACTS_PATH_LOCAL
elif env == 'docker':
cassandra_path = CASSANDRA_PATH_DOCKER
artifacts_path = ARTIFACTS_PATH_DOCKER
files = []
for file in os.listdir(current_dir):
if file.endswith(file_sufix):
files += [file]
prefix_and_files = map(lambda f: { 'id': filename_prefix_to_int(f), 'file': f}, files)
#int_prefix_and_fi
|
les = filter(lambda pf: pf['id'], prefix_and_files)
sorted_int_prefix_and_files = sorted(prefix_and_files, key=lambda d: d['id'], reverse=reverse)
print sorted_int_prefix_and_files
for file in sorted_int_prefix_and_files:
cqlsh_path = cassandra_path + 'cqlsh'
source_arg = 'SOURCE \'' + artifacts_path + '/' + file['file'] + '\''
call_args = [cqlsh_path, '-e', source_arg]
print call_args
call_output = check_output(call_args)
print call_output
current_dir = os.path.dirname(os.path.abspath(__file__))
if len(sys.argv) < 3:
print 'A parameter is required: up, down or seed.'
print 'A parameter is required: local or docker.'
quit()
env = None
if sys.argv[2] == 'local':
env = 'local'
elif sys.argv[2] == 'docker':
env = 'docker'
else:
print 'Available envs are: local and docker.'
if sys.argv[1] == 'up':
db_operation(current_dir, "_up.cql", env, False)
elif sys.argv[1] == 'down':
db_operation(current_dir, "_down.cql", env, True)
elif sys.argv[1] == 'seed':
db_operation(current_dir, "_seed.cql", env, False)
else:
print 'Available commands are: up, down and seed.'
|
mitsuhiko/sentry
|
src/sentry/api/endpoints/group_environment_details.py
|
Python
|
bsd-3-clause
| 2,927
| 0.000342
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.base import StatsMixin
from sentry.api.bases.group import GroupEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.environment import (
GroupEnvironmentWithStatsSerializer
)
from sentry.api.serializers.models.grouprelease import (
GroupReleaseWithStatsSerializer
)
from sentry.models import Environment, GroupRelease, ReleaseEnvironment
class GroupEnvironmentDetailsEndpoint(GroupEndpoint, StatsMixin):
def get(self, request, group, environment):
try:
environment = Environment.objects.get(
project_id=group.project_id,
# XXX(dcramer): we have no great way to pass the empty env
name='' if environment == 'none' else environment,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
first_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('first_seen').first()
last_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('-first_seen').first()
# the current release is the 'latest seen' release within the
# environment even if it hasnt affected this issue
current_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
release_id=ReleaseEnvironment.objects.filter(
project_id=group.project_id,
environment_id=environment.id,
).order_by('-first_seen').values_list('release_id', flat=True).first(),
).first()
last_seen = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('-last_seen').values_list('last_seen', flat=True).first()
stats_args = self._parse_args(request)
context = {
'environment': serialize(
environment, request.user, GroupEnvironmentWithStatsSerializer(
group=group,
since=stats_args['start'],
until=stats_args['end'],
)
),
'firstRelease': serialize(first_release, request.user),
'lastRelease': serialize(last_release, request.user),
'currentRelease': serialize(
current_release, request.user, GroupReleaseWithStatsSerializer(
since=stats_args['start'],
until=stats_args['end'],
)
),
'lastSeen': last_seen,
'firstSeen': first_release.first_seen if first_release else None,
}
return Respon
|
se(c
|
ontext)
|
andreymal/mini_fiction
|
mini_fiction/dumpload.py
|
Python
|
gpl-3.0
| 18,621
| 0.001913
|
import os
import sys
from datetime import datetime
from io import BytesIO
from pathlib import Path
from typing import List
from zipfile import ZIP_DEFLATED, ZipFile, ZipInfo
from mini_fiction import ponydump
from mini_fiction.logic.image import SavedImage
# Вообще всё будет работать и без этих exclude, но выкидывание избыточной
# информации о связях сильно уменьшает размер дампа
# TODO: wrap into dataclass
dumpdb_params = {
'story': {'exclude': (
'edit_log', 'story_views_set', 'votes', 'favorites', 'bookmarks',
'comments', 'activity', 'local', 'in_series_permissions',
'contributors', 'chapters', 'tags', 'tags_log',
)},
'author': {'exclude': (
'activity', 'edit_log',
'favorites', 'bookmarks', 'contributing', 'coauthorseries',
'news', 'votes', 'views',
'news_comments', 'news_comment_edits', 'news_comment_votes',
'news_last_edited_comments', 'news_deleted_comments',
'story_comments', 'story_comment_edits', 'story_comment_votes',
'story_last_edited_comments', 'story_deleted_comments',
'story_local_comments', 'story_local_comment_edits',
'story_local_last_edited_comments', 'story_local_deleted_comments',
'contacts', 'subscriptions', 'notifications',
'created_notifications', 'published_stories', 'approvals',
'registration_profiles',
'change_email_profiles', 'password_reset_profiles',
'abuse_reports', 'admin_log', 'tags_created', 'tags_aliases',
'tags_blacklist', 'tags_log',
)},
'chapter': {'exclude': (
'edit_log', 'chapter_views_set',
)},
'storycomment': {'exclude': (
'answers', 'edits', 'votes',
)},
'storylocalthread': {'exclude': (
'comments',
)},
'storylocalcomment': {'exclude': (
'answers', 'edits',
)},
'newsitem': {'exclude': (
'comments',
)},
'newscomment': {'exclude': (
'answers', 'edits', 'votes',
)},
'character': {'exclude': (
'stories',
)},
'tagcategory': {'exclude': (
'tags',
)},
'tag': {'exclude': (
'aliases', 'stories', 'log',
)},
'rating': {'exclude': (
'stories',
)},
'series': {'exclude': (
'permissions',
)},
'adminlogtype': {'exclude': (
'log',
)},
}
# В целях безопасности и защиты от дурака ВСЕ поля моделей для zip-дампа
# должны быть упомянуты в include, exclude или override (но не media)
zip_dump_params = {
'logopic': {
'include': (
'id', 'image_bundle', 'visible', 'description',
'original_link', 'original_link_label', 'created_at', 'updated_at',
),
'datekey': 'updated_at',
'media': ('image_bundle',),
},
'charactergroup': {
'include': ('id', 'name', 'description'),
'exclude': ('characters',),
},
'character': {
'include': ('id', 'name', 'description', 'image_bundle', 'group'),
'exclude': (
'stories',
),
'media': ('image_bundle',),
},
'tagcategory': {
'include': ('id', 'name', 'description', 'created_at', 'updated_at'),
'exclude': ('tags',),
},
'tag': {
'include': (
'id', 'name', 'iname', 'category', 'description',
'is_spoiler', 'created_at', 'updated_at', 'is_alias_for',
'is_hidden_alias', 'is_extreme_tag', 'reason_to_blacklist',
),
'exclude': (
'created_by', 'stories_count', 'published_stories_count',
'aliases', 'stories', 'log',
),
},
'rating': {
'include': ('id', 'name', 'description', 'nsfw'),
'exclude': ('stories',),
},
'staticpage': {
'include': ('name', 'lang', 'title', 'content', 'is_template', 'is_full_page', 'date', 'updated'),
'datekey': 'updated',
},
'htmlblock': {
'include': ('name', 'lang', 'title', 'content', 'is_template', 'cache_time', 'date', 'updated'),
'datekey': 'updated',
},
'adminlogtype': {
'include': ('id', 'model'),
'exclude': ('log',),
},
'author': {
# Дампи
|
тся только один системный пользователь
'include': (
'bio', 'date_joined', 'first_name', 'image_bundle',
'id', 'is_active', 'is_staff', 'is_superuser', 'last_name', 'last_visit', 'username',
'activated_at', 'last_login', 'text_source_behaviour',
),
'exclude': (
'comment_spoiler_threshold', 'comments_maxdepth', 'detail_view', 'excluded_categories',
'la
|
st_viewed_notification_id', 'nsfw', 'premoderation_mode', 'last_password_change',
'silent_email', 'silent_tracker', 'comments_per_page', 'header_mode', 'extra',
'ban_reason', 'published_stories_count', 'all_story_comments_count', 'timezone',
'session_token',
),
'override': {'email': '', 'password': ''},
'with_collections': False,
'media': ('image_bundle',),
},
}
class MiniFictionDump(ponydump.PonyDump):
def __init__(self, database=None, dict_params=None, chunk_sizes=None, default_chunk_size=250):
if database is None:
from mini_fiction.database import db
database = db
ready_dict_params = dict(dumpdb_params)
if dict_params:
ready_dict_params.update(dict(dict_params))
full_chunk_sizes = {
'chapter': 25,
'story': 100,
}
if chunk_sizes:
full_chunk_sizes.update(chunk_sizes)
super().__init__(
database,
dict_params=ready_dict_params,
chunk_sizes=full_chunk_sizes,
default_chunk_size=default_chunk_size,
)
# В PonyDump._build_depmap() сортировка моделей по опциональным
# зависимостям не определена, но вот эти небольшие перестановки
# сильно улучшают производительность за счёт меньшего использования
# depcache
self.put_depmap_entity_after('series', after_entity='story')
self.put_depmap_entity_after('coauthorsseries', after_entity='series')
self.put_depmap_entity_after('inseriespermissions', after_entity='coauthorsseries')
self.put_depmap_entity_after('activity', after_entity=None) # После всех
self.put_depmap_entity_after('storyview', after_entity=None)
def dumpdb_console(dirpath, entities_list=None, gzip_compression=0, verbosity=2):
from mini_fiction.utils.misc import progress_drawer
mfd = MiniFictionDump()
ljust_cnt = max(len(x) for x in mfd.entities) + 2
drawer = None
current = None
for status in mfd.dump_to_directory(dirpath, entities_list, gzip_compression=gzip_compression):
if not status['entity']:
# Закончилось всё
if verbosity:
print()
continue
if current != status['entity']:
current = status['entity']
if verbosity == 1:
print(current, end='... ')
sys.stdout.flush()
if verbosity >= 2 and not drawer:
print(current.ljust(ljust_cnt), end='')
drawer = progress_drawer(status['count'], show_count=True)
drawer.send(None)
drawer.send(status['current'])
if not status['pk']:
# Закончилась одна модель
if verbosity >= 2:
try:
drawer.send(None)
except StopIteration:
pass
drawer = None
if verbosity:
print()
elif verbosity:
print('ok. {}'.format(status['count']))
continue
if verbosity >= 2:
drawer.send(status['current'])
def loaddb_console(paths, verbosity=2, only_create=False):
from mini_fiction.utils.misc import progress_drawer
mfd = MiniFictionDump()
filelist = mfd.walk_all_paths(paths)
if not filelist:
raise OSError('Cannot find dump files')
ljust_cnt = max(len(os.path.split(x[1])[-1]) for x in filelist) + 2
created
|
thomasvdv/flightbit
|
forecast/keys_iterator.py
|
Python
|
gpl-2.0
| 1,140
| 0.001754
|
import traceback
import sys
from gribapi import *
INPUT = 'rap_130_20120822_2200_001.grb2'
VERBOSE = 1 # verbose error reporting
def example():
f = open(INPUT)
while 1:
gid = grib_new_from_file(f)
if gid is None: break
iterid = grib_keys_iterator_new(gid, 'ls')
# Different types of keys can be skipped
# grib_skip_computed(iterid)
# grib_skip_coded(iterid)
# grib_skip_edition_specific(iterid)
# grib_skip_duplicates(iterid)
# grib_skip_read_only(iterid)
# grib_skip_function(iterid)
while grib_keys_iterator_next(iterid):
keyname = grib_keys
|
_iterator_get_name(iterid)
keyval = grib_get_string(iterid, keyname)
print "%s = %s" % (keyname, keyval)
grib_keys_iterator_delete(iterid)
grib_release(gid)
f.close()
def main():
try:
example()
except GribInternalError, er
|
r:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
print >> sys.stderr, err.msg
return 1
if __name__ == "__main__":
sys.exit(main())
|
janusnic/shoop
|
shoop/front/utils/product_sorting.py
|
Python
|
agpl-3.0
| 1,352
| 0
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
PRODUCT_SORT_CHOICES = [
("name_a", _(u"Name - A-Z")),
("name_d", _(u"Name - Z-A")),
("price_a", _(u"Price - Low to High")),
("price_d", _(u"Price - High to Low")),
]
def sort_products(request, products, sort):
if not sort:
sort = ""
# Force sorting despite what collation says
sorter = _get_product_name_lowered_stripped
key = (sort[:-2] if sort.endswith(('_a', '_d')) else sort)
reverse = bool(sort.endswith('_d'))
if key == "name":
sorter = _get_product_name_lowered
elif key == "price":
sorter = _get_product_price_getter_for_request(request)
if sorter:
products = sorted(products, key=sorter, reverse=reverse)
return products
def _get_product_name_lowered_stripped(product):
return product.name.lowe
|
r().strip()
def _get_product_name_lowered(produ
|
ct):
return product.name.lower()
def _get_product_price_getter_for_request(request):
def _get_product_price(product):
return product.get_price(request)
return _get_product_price
|
ejona86/grpc
|
src/python/grpcio_tests/tests_aio/unit/_constants.py
|
Python
|
apache-2.0
| 816
| 0
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apa
|
che License, Version 2.0 (the "License");
# you may not use thi
|
s file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If we use an unreachable IP, depending on the network stack, we might not get
# with an RST fast enough. This used to cause tests to flake under different
# platforms.
UNREACHABLE_TARGET = 'foo/bar'
UNARY_CALL_WITH_SLEEP_VALUE = 0.2
|
jasonrbriggs/stomp.py
|
tests/test_override_threading.py
|
Python
|
apache-2.0
| 1,654
| 0
|
import logging
from concurrent.futures import ThreadPoolExecutor
import stomp
from stomp.listener import TestListener
from .testutils import *
executor = ThreadPoolExecutor()
def create_thread(fc):
f = executor.submit(fc)
print("Created future
|
%s on executor %s" % (f, executor))
return f
class ReconnectListener(TestListener):
def __in
|
it__(self, conn):
TestListener.__init__(self, "123", True)
self.conn = conn
def on_receiver_loop_ended(self, *args):
if self.conn:
c = self.conn
self.conn = None
c.connect(get_default_user(), get_default_password(), wait=True)
c.disconnect()
@pytest.fixture
def conn():
conn = stomp.Connection(get_default_host())
# check thread override here
conn.transport.override_threading(create_thread)
listener = ReconnectListener(conn)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
class TestThreadingOverride(object):
def test_threading(self, conn):
listener = conn.get_listener("testlistener")
queuename = "/queue/test1-%s" % listener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body="this is a test", destination=queuename, receipt="123")
validate_send(conn, 1, 1, 0)
logging.info("first disconnect")
conn.disconnect(receipt="112233")
logging.info("reconnecting")
conn.connect(get_default_user(), get_default_password(), wait=True)
logging.info("second disconnect")
conn.disconnect()
|
MrAlexDeluxe/Zeeguu-Web
|
zeeguu_web/app.py
|
Python
|
mit
| 1,825
| 0.006027
|
# -*- coding: utf8 -*-
import os
import os.path
import flask
import flask_assets
im
|
port flask_sqlalchemy
from .cross_domain_app import CrossDomainApp
from zeeguu.util.configuration import load_config
|
uration_or_abort
import sys
if sys.version_info[0] < 3:
raise "Must be using Python 3"
# *** Starting the App *** #
app = CrossDomainApp(__name__)
load_configuration_or_abort(app, 'ZEEGUU_WEB_CONFIG',
['HOST', 'PORT', 'DEBUG', 'SECRET_KEY', 'MAX_SESSION',
'SMTP_SERVER', 'SMTP_USERNAME', 'SMTP_PASSWORD',
'INVITATION_CODES'])
# The zeeguu.model module relies on an app being injected from outside
# ----------------------------------------------------------------------
import zeeguu
zeeguu.app = app
import zeeguu.model
assert zeeguu.model
# -----------------
from .account import account
app.register_blueprint(account)
from .exercises import exercises
app.register_blueprint(exercises)
from zeeguu_exercises import ex_blueprint
app.register_blueprint(ex_blueprint, url_prefix="/practice")
from umr import umrblue
app.register_blueprint(umrblue, url_prefix="/read")
env = flask_assets.Environment(app)
env.cache = app.instance_path
env.directory = os.path.join(app.instance_path, "gen")
env.url = "/gen"
env.append_path(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "static"
), "/static")
# create the instance folder and return the path
def instance_path(app):
path = os.path.join(app.instance_path, "gen")
try:
os.makedirs(path)
except Exception as e:
print(("exception" + str(e)))
if not os.path.isdir(path):
raise
return path
instance = flask.Blueprint("instance", __name__, static_folder=instance_path(app))
app.register_blueprint(instance)
|
odoo-colombia/l10n-colombia
|
account_tax_group_type/__manifest__.py
|
Python
|
agpl-3.0
| 636
| 0.001577
|
# -*- coding: utf-8 -*-
# Copyright 2019 Joan Marín <Github@JoanMarin>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Tax Group Types",
"category": "Financial",
|
"version": "10.0.1.0.0",
"author": "EXA Auto Parts Github@exaap, "
"Joan Marín Github@JoanMarin",
"website": "https://github.com/odooloco/l10n-colombia",
"license": "AGPL-3",
"summary": "Types for Tax Groups",
"depends": [
"account_tax_group_menu",
],
"data": [
'security/ir.model.access.csv',
"views/account_tax_grou
|
p_views.xml",
],
"installable": True,
}
|
openstack/neutron-lib
|
neutron_lib/api/definitions/network_segment_range.py
|
Python
|
apache-2.0
| 5,876
| 0
|
# Copyright (c) 2018 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib import constants
from neutron_lib.db import constants as db_const
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
# The name of the extension.
NAME = 'Neutron Network Segment Range'
# The alias of the extension.
ALIAS = 'network-segment-range'
# The description of the extension.
DESCRIPTION = "Provides support for the network segment range management"
# A timestamp of when the extension was introduced.
UPDATED_TIMESTAMP = "2018-11-29T00:00:00-00:00"
# The name of the resource.
RESOURCE_NAME = 'network_segment_range'
# The plural for the resource.
COLLECTION_NAME = 'network_segment_ranges'
# Min ID for VLAN, VXLAN, GRE and GENEVE all equal to 1; Max ID for them are
# 4094, 2 ** 24 - 1, 2 ** 32 - 1 and 2 ** 24 - 1 respectively.
# Take the largest range: [MIN_GRE_ID, MAX_GRE_ID] as the limit for validation.
NETWORK_SEGMENT_RANGE_LIMIT = [constants.MIN_GRE_ID, constants.MAX_GRE_ID]
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_filter': True,
'is_sort_key': True,
'primary_key': True,
'is_visible': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
'default': '',
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'default': {'allow_post': False,
'allow_put': False,
'convert_to': converters.convert_to_boolean,
'default': False,
'is_visible': True},
constants.SHARED: {'allow_post': True,
'allow_put': False,
'convert_to': converters.convert_to_boolean,
'default': True,
'is_visible': True},
'project_id': {'allow_post': True,
'allow_put': False,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'required_by_policy': True,
'is_filter': True,
'is_sort_key': True,
'is_visible': True},
'network_type': {'allow_post': True,
'allow_put': False,
'validate': {
'type:values':
constants.NETWORK_SEGMENT_RANGE_TYPES},
'default': constants.ATTR_NOT_SPECIFIED,
'is_filter': True,
'is_visible': True},
'physical_network': {'allow_post': True,
'allow_put': False,
'validate': {
'type:string':
providernet.PHYSICAL_NETWORK_MAX_LEN},
'default': constants.ATTR_NOT_SPECIFIED,
'is_filter': True,
'is_visible': True},
'minimum': {'allow_post': True,
'allow_put': True,
'convert_to': converters.convert_to_int,
'validate': {'type:range': NETWORK_SEGMENT_RANGE_LIMIT},
'is_visible': True},
'maximum': {'allow_post': True,
'allow_put': True,
'conv
|
ert_to': converters.convert_to_int,
'validate': {'type:range': NETWORK_SEGMENT_RANGE_LIMIT},
'is_visible': True},
'used': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'available': {'allow_post': False,
'allow_put': False,
'convert_to': converters.convert_none_to_empty_list,
|
'is_visible': True}
}
}
# Whether or not this extension is simply signaling behavior to the user
# or it actively modifies the attribute map.
IS_SHIM_EXTENSION = False
# Whether the extension is marking the adoption of standardattr model for
# legacy resources, or introducing new standardattr attributes. False or
# None if the standardattr model is adopted since the introduction of
# resource extension.
# If this is True, the alias for the extension should be prefixed with
# 'standard-attr-'.
IS_STANDARD_ATTR_EXTENSION = False
# The subresource attribute map for the extension. It adds child resources
# to main extension's resource. The subresource map must have a parent and
# a parameters entry. If an extension does not need such a map, None can
# be specified (mandatory).
SUB_RESOURCE_ATTRIBUTE_MAP = {}
# The action map: it associates verbs with methods to be performed on
# the API resource.
ACTION_MAP = {}
# The list of required extensions.
REQUIRED_EXTENSIONS = [providernet.ALIAS]
# The list of optional extensions.
OPTIONAL_EXTENSIONS = []
# The action status.
ACTION_STATUS = {}
|
PARINetwork/pari
|
core/utils.py
|
Python
|
bsd-3-clause
| 5,677
| 0.002818
|
from __future__ import print_function
import datetime
from collections import OrderedDict
from django.urls import reverse
from django.http import JsonResponse
from django.utils.translation import get_language, activate
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.core.rich_text import RichText
def get_translations_for_page(page):
translations = []
activate(get_language())
try:
trans_holder = page.get_children().get(title="Translations")
if page.live:
translations.append(page.specific)
translation
|
s.extend(trans_holder.get_children().live().specific())
|
except Page.DoesNotExist:
# Check if page exists within the translation folder
parent = page.get_parent()
if parent.title == "Translations":
if parent.get_parent().live:
translations.append(parent.get_parent().specific)
live_children = parent.get_children().live()
if live_children:
translations.extend(live_children.specific())
return translations
def get_translated_or_default_page(default_page, translations):
translated_page = default_page
for translation in translations:
if translation.language == get_language():
translated_page = translation
return translated_page
def filter_by_language(request, *items_to_filter):
lang = get_language()
filtered_list = []
if request.GET.get("lang"):
lang = request.GET["lang"]
if not lang == 'all':
for item in items_to_filter:
filtered_list.append(item.filter(language=lang))
return tuple(items_to_filter) if len(filtered_list) == 0 else tuple(filtered_list)
def get_translations_for_articles(articles):
article_translations = {}
for article in articles:
article_translations[article] = get_translations_for_page(article)
return article_translations
def get_unique_photographers(album):
photographers = []
for slide in album.slides.all():
photographers.extend(slide.image.photographers.all())
return set(photographers)
def get_slide_detail(album):
response_data = {}
response_data['slides'] = []
photographers = []
slide_photo_graphers = []
for slide in album.slides.all():
slide_photo_graphers.extend(map(lambda photographer_name: photographer_name.name,
slide.image.photographers.all()))
photographers_of_album = list(set(slide_photo_graphers))
for index, slide in enumerate(album.slides.all(), start=0):
slide_dict = dict([('type', 'image'), ('show_title', "True"), ('album_title', album.title)])
slide_dict['src'] = slide.image.file.url
slide_dict['src_resized'] = slide.image.get_rendition('height-876').url
block = blocks.RichTextBlock()
description_value = RichText(slide.description)
slide_dict['description'] = block.render(description_value)
slide_dict['album_description'] = album.description
slide_dict['url'] = album.get_absolute_url()
slide_dict['slide_photographer'] = list(map(lambda photographer_name: photographer_name.name,
slide.image.photographers.all()))
if index == 0:
slide_dict['slide_photographer'] = photographers_of_album
photographers.extend(set(slide.image.photographers.all()))
if album.first_published_at:
published_date = datetime.datetime.strptime(str(album.first_published_at)[:10], "%Y-%m-%d")
else:
published_date = datetime.datetime.now()
date = published_date.strftime('%d %b,%Y')
slide_dict['image_captured_date'] = date
image_location = slide.image.locations.first()
slide_dict['slide_location'] = "%s, %s" % (
image_location.district, image_location.state) if image_location else ''
slide_dict['track_id'] = slide.audio
response_data['slides'].append(slide_dict)
response_data['authors'] = []
for photographer in set(photographers):
photographer_dict = dict(
[('type', 'inline'), ('show_title', "False"), ('name', photographer.name), ('bio', photographer.bio_en),
('twitter_username', photographer.twitter_handle), ('facebook_username', photographer.facebook_username),
('email', photographer.email), ('website', photographer.website),
('author_url', reverse('author-detail', kwargs={'slug': photographer.slug}))])
response_data['authors'].append(photographer_dict)
return JsonResponse(response_data)
class SearchBoost(object):
TITLE = 6
AUTHOR = 5
LOCATION = 4
DESCRIPTION = 3
CONTENT = 2
def construct_guidelines(guideline_content):
guideline_dict = OrderedDict()
for content in guideline_content:
if content.block_type == "heading_title":
current_heading = content.value
guideline_dict[current_heading] = {"sub_section": []}
if content.block_type == "heading_content":
guideline_dict[current_heading]["heading_content"] = content.value
if content.block_type == "sub_section_with_heading":
guideline_dict[current_heading]["has_sub_section_with_heading"] = True
guideline_dict[current_heading]["sub_section"].append(content.value)
if content.block_type == "sub_section_without_heading":
guideline_dict[current_heading]["has_sub_section_with_heading"] = False
guideline_dict[current_heading]["sub_section"].append({"content": content.value})
return guideline_dict
|
jnayak1/osf-meetings
|
meetings/meetings/wsgi.py
|
Python
|
apache-2.0
| 393
| 0
|
"""
WSGI config for
|
meetings project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS
|
_MODULE", "meetings.settings")
application = get_wsgi_application()
|
albertz/music-player
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_ciplugininterface.py
|
Python
|
bsd-2-clause
| 488
| 0.006148
|
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
|
from Quartz import *
class TestCIPluginInterfaceHe
|
lper (NSObject):
def load_(self, h): return 1
class TestCIPlugInInterface (TestCase):
def testMethods(self):
self.assertResultIsBOOL(TestCIPluginInterfaceHelper.load_)
def no_testProtocol(self):
p = objc.protocolNamed('CIPlugInRegistration')
self.assertIsInstancE(p, objc.formal_protocol)
if __name__ == "__main__":
main()
|
brotchie/keepnote
|
keepnote/notebook/connection/__init__.py
|
Python
|
gpl-2.0
| 11,118
| 0.004767
|
"""
KeepNote
Low-level Create-Read-Update-Delete (CRUD) interface for notebooks.
"""
#
# KeepNote
# Copyright (c) 2008-2011 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@alum.mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
import urlparse
#=============================================================================
# errors
class ConnectionError (StandardError):
def __init__(self, msg="", error=None):
StandardError.__init__(self, msg)
self.error = error
def repr(self):
if self.error is not None:
return StandardError.repr(self) + ": " + repr(self.error)
else:
return StandardError.repr(self)
class UnknownNode (ConnectionError):
def __init__(self, msg="unknown node"):
ConnectionError.__init__(self, msg)
class NodeExists (ConnectionError):
def __init__(self, msg="node exists"):
C
|
onnectionError.__init__(self, msg)
class FileError (ConnectionError):
def __init__(self, msg="file error", error=None):
ConnectionError.__init__(self, msg, error)
class UnknownFile (FileError):
def __init__(self, msg="unknown fi
|
le"):
FileError.__init__(self, msg)
class CorruptIndex (ConnectionError):
def __init__(self, msg="index error", error=None):
ConnectionError.__init__(self, msg, error)
#=============================================================================
# file path functions
def path_join(*parts):
"""
Join path parts for node file paths
Node files always use '/' for path separator.
"""
# skip empty strings
# trim training "slashes"
return "/".join((part[:-1] if part[-1] == "/" else part)
for part in parts if part != "")
def path_basename(filename):
"""
Return the last component of a filename
aaa/bbb => bbb
aaa/bbb/ => bbb
aaa/ => aaa
aaa => aaa
'' => ''
/ => ''
"""
if filename.endswith("/"):
i = filename.rfind("/", 0, -1) + 1
return filename[i:-1]
else:
i = filename.rfind("/", 0, -1) + 1
return filename[i:]
#=============================================================================
class NoteBookConnection (object):
def __init__(self):
pass
#======================
# connection API
def connect(self, url):
"""Make a new connection"""
pass
def close(self):
"""Close connection"""
pass
def save(self):
"""Save any unsynced state"""
pass
#======================
# Node I/O API
def create_node(self, nodeid, attr):
"""Create a node"""
# TODO: document root creation
# proposal 1: if rootid is not set yet, then this node is root
# proposal 2: if parentids is [], then this node is root
# proposal 3: try to remove root concept from connection
raise NotImplementedError("create_node")
def read_node(self, nodeid):
"""Read a node attr"""
raise NotImplementedError("read_node")
def update_node(self, nodeid, attr):
"""Write node attr"""
raise NotImplementedError("update_node")
def delete_node(self, nodeid):
"""Delete node"""
raise NotImplementedError("delete_node")
def has_node(self, nodeid):
"""Returns True if node exists"""
raise NotImplementedError("has_node")
# TODO: can this be simplified with a search query?
def get_rootid(self):
"""Returns nodeid of notebook root node"""
raise NotImplementedError("get_rootid")
#===============
# file API
def open_file(self, nodeid, filename, mode="r", codec=None):
"""
Open a file contained within a node
nodeid -- node to open a file from
filename -- filename of file to open
mode -- can be "r" (read), "w" (write), "a" (append)
codec -- read or write with an encoding (default: None)
"""
raise NotImplementedError("open_file")
def delete_file(self, nodeid, filename):
"""Delete a file contained within a node"""
raise NotImplementedError("delete_file")
def create_dir(self, nodeid, filename):
"""Create directory within node"""
raise NotImplementedError("create_dir")
def list_dir(self, nodeid, filename="/"):
"""
List data files in node
"""
raise NotImplementedError("list_dir")
def has_file(self, nodeid, filename):
raise NotImplementedError("has_file")
def move_file(self, nodeid, filename1, nodeid2, filename2):
"""
Move or rename a node file
'nodeid1' and 'nodeid2' cannot be None.
"""
if nodeid is None or nodeid2 is None:
raise UnknownFile("nodeid cannot be None")
self.copy_file(nodeid, filename, nodeid, new_filename)
self.delete_file(nodeid, filename)
def copy_file(self, nodeid1, filename1, nodeid2, filename2):
"""
Copy a file between two nodes
'nodeid1' and 'nodeid2' can be the same nodeid.
If 'nodeidX' is None, 'filenameX' is assumed to be a local file.
If 'filename1' is a dir (ends with a "/") filename2 must be a dir
and the copy is recursive for the contents of 'filename1'.
"""
if filename1.endswith("/"):
# copy directory tree
self.create_dir(nodeid2, filename2)
for filename in self.list_dir(nodeid1):
self.copy_file(nodeid1, path_join(filename1, filename),
nodeid2, path_join(filename2, filename))
else:
# copy file
if nodeid1 is not None:
stream1 = self.open_file(nodeid1, filename1)
else:
# filename1 is local
stream1 = open(filename1, "rb")
if nodeid2 is not None:
stream2 = self.open_file(nodeid2, filename2, "w")
else:
# filename 2 is local
stream2 = open(filename2, "w")
while True:
data = stream1.read(1024*4)
if len(data) == 0:
break
stream2.write(data)
stream1.close()
stream2.close()
#---------------------------------
# indexing
def index(self, query):
# TODO: make this plugable
# built-in queries
# ["index_attr", key, (index_value)]
# ["search", "title", text]
# ["search_fulltext", text]
# ["has_fulltext"]
# ["node_path", nodeid]
# ["get_attr", nodeid, key]
if query[0] == "index_attr":
index_value = query[3] if len(query) == 4 else False
return self.index_attr(query[1], query[2], index_value)
elif query[0] == "search":
assert query[1] == "title"
return self.search_node_titles(query[2])
elif query[0] == "search_fulltext":
return self.search_node_contents(query[1])
elif query[0] == "has_fulltext":
return False
elif query[0] == "node_path":
return self.get_node_path_by_id(query[1])
elif query[0] == "get_attr":
return self.get_attr_by_id(query[1], query[2])
# FS-specific
elif query[0] == "init":
return self.init_index()
|
Yelp/mycroft
|
mycroft/tests/backend/test_etl_helper.py
|
Python
|
mit
| 2,789
| 0.000359
|
# -*- coding: utf-8 -*-
import pytest
from tests.models.test_etl_record import etl_records # noqa
from tests.models.test_abstract_records import dynamodb_connection # noqa
from mycroft.backend.worker.etl_status_helper import ETLStatusHelper
import mock
RECORDS = [
{'status': 'error', 'date': '2014-09-01', 'start_time': 4, 'end_time': 10,
'error_info': {'crash_a': 'error_a', 'crash_b': 'error_b'}},
{'status': 'success', 'date': '2014-09-02', 'start_time': 6, 'end_time': 12,
'error_info': {}},
]
MSG = {
'uuid': 'some-uuid',
'redshift_id': 'some-rs-id',
}
KWARGS = {
'hash_key': None,
'etl_status': None,
'et_starttime': None,
'load_starttime': None,
'data_date': None,
'run_by': None,
'redshift_id': None,
'job_id': None,
'etl_error': None,
}
class TestETLStatusHelper(object):
@pytest.yield_fixture # noqa
def get_etl_helper(self, etl_records):
with mock.patch(
'mycroft.models.aws_connections.TableConnection.get_connection'
) as mocked_etl:
mocked_etl.return_value = etl_records
yield ETLStatusHelper()
def test_etl_step_started(self, get_etl_helper):
etl = get_etl_helper
for r in RECORDS:
date = r['date']
step = 'et'
# run twice to hit new and update record cases
etl.etl_step_started(MSG, date, step)
etl.etl_step_started(MSG, date, step)
entry = etl.etl_db.get(hash_key='some-uuid', data_date=date)
entry_dict = entry.get(**KWARGS)
assert entry_dict['hash_key'] == 'some-uuid'
assert entry_dict['data_date'] == date
with pytest.raises(ValueError):
etl.etl_step_started(MSG, None, 'et')
def test_etl_step_complete(self, get_etl_helper):
etl = get_etl_helper
for r in RECORDS:
date = r['date']
step = 'et'
|
# test case: no previous record
etl.etl_step_complete(MSG, date, step, r)
# test case: existing record
etl.etl_step_started(MSG, date,
|
step)
etl.etl_step_complete(MSG, date, step, r)
entry = etl.etl_db.get(hash_key='some-uuid', data_date=date)
entry_dict = entry.get(**KWARGS)
assert entry_dict['hash_key'] == 'some-uuid'
assert entry_dict['data_date'] == date
if entry_dict['etl_status'] == 'load_success':
assert entry_dict.get('etl_error') is None
elif entry_dict['etl_status'] == 'load_error':
assert entry_dict['etl_error'] == str(r['error_info'])
with pytest.raises(ValueError):
etl.etl_step_complete(MSG, None, 'et', RECORDS[0])
|
jogo279/trobo
|
opponents/corey_abshire/tronmoves.py
|
Python
|
bsd-2-clause
| 4,160
| 0.001442
|
# tronmoves: Moves library for a Google AI 2010 TronBot entry.
# Copyright (C) 2010 Corey Abshire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, logging, games, utils, tron
from tronutils import *
from aimatron import *
#_____________________________________________________________________
# Basic Moves (from starter package)
#
def random_move(board):
"Just return any random legal move."
return random.choice(board.moves())
def most_free_move(board):
"Find the move that has the most immediate floor tiles."
bestcount = -1
bestmove = tron.NORTH
for dir in board.moves():
dest = board.rel(dir)
count = 0
for pos in board.adjacent(dest):
if board[pos] == tron.FLOOR:
count += 1
if count > bestcount:
bestcount = count
bestmove = dir
return bestmove
def follow_wall_move(board, order):
"Find a move that follows the wall, favoring the order given."
decision = board.moves()[0]
for dir in order:
dest = board.rel(dir)
if not board.passable(dest):
continue
adj = board.adjacent(dest)
if any(board[pos] == tron.WALL for pos in adj):
decision = dir
break
return decision
#_____________________________________________________________________
# Core Moves (called directly from my main bot)
#
def surrender_move(board):
"Return a constant move in order to surrender."
return tron.NORTH
def most_open_move(board, order):
"Find the move that has the most open floor filled around it."
p1, p2, t, touching = dfs_count_around(board)
wall_move = follow_wall_move(board, order)
open_move = utils.argmax(p1.keys(), lambda k: p1[k])
if p1[wall_move] == p1[open_move]:
best_move = wall_move
else:
best_move = open_move
logging.debug("most open move is: %s (%d) %s", best_move, p1[best_move], p1)
return best_move
def minimax_move(board, finish_by=None):
"Find a move based on an alpha-beta search of the game tree."
game = TronGame()
return alphabeta_search(board, game, finish_by)
d
|
ef follow_path_move(board, path):
"Follow the given path."
return move_made(board.me(), path[1])
def same_dist_move(board, same_dist, order):
"Try to draw a line through the same distance tiles."
first_point = same_dist[0]
last_point = same_dist[-1]
if board.passable(first_point):
path = shortest_path(board, board.me(), first_point)
return move_made(board.me(), path[1])
elif board.passable(last_point):
|
path = shortest_path(board, board.me(), last_point)
return move_made(board.me(), path[1])
else:
return most_open_move(board, order)
#_____________________________________________________________________
# Experimental Moves (tried, but not in use)
#
def chunky_minimax_move(board, chunk_size, finish_by=None):
"Find a move based on an alpha-beta search of the game tree."
game = TronChunkyGame(chunk_size)
return alphabeta_search(board, game, finish_by)
def hotspot_move(board):
"Find the move that targets the next hot spot."
try:
me = board.me()
heat = heat_map(board)
hotspots = heat.keys()
hotspots.sort(key=lambda k: heat[k], reverse=True)
target = hotspots[0]
path = shortest_path(board, me, target)
next_step = path[1]
move = move_made(me, next_step)
return move
except KeyError:
return minimax_move(state)
|
alaudet/hcsr04sensor
|
recipes/cylinder_volume_side_metric.py
|
Python
|
mit
| 844
| 0
|
"""Calculate the liquid volume of a cylinder on its side"""
from hcsr04sensor import sensor
trig_pin = 17
echo_pin = 27
# default values
# temperature = 20 celcius
# unit = "metric"
# gpio_mode = GPIO
|
.BCM
# Get litres in a cylinder
cyl_length_metric = 300 # centimeters
cyl_radius_metric = 48 # centimeters
cyl_depth = 96 # cm from sensor to bottom
value = sensor.Measurement(trig_pin, echo_pin)
# for imperial add temp and unit and change all cm values to inches
# value =
|
sensor.Measurement(trig_pin, echo_pin, 68, 'imperial')
distance = value.raw_distance()
water_depth_metric = value.depth(distance, cyl_depth)
volume_litres = value.cylinder_volume_side(
water_depth_metric, cyl_length_metric, cyl_radius_metric
)
print(
"The liquid volume of the cylinder on its side {} litres".format(
round(volume_litres, 1)
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.