repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
audaciouscode/PassiveDataKit-Django | migrations/0069_auto_20190915_1605.py | Python | apache-2.0 | 600 | 0.001667 | # pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-15 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0068_remove_deviceissue_pl | atform_version'),
]
operations = [
migrations.AlterField(
model_name='deviceissue',
name='state',
field=models.CharField(choices=[('opened', 'Open | ed'), ('in-progress', 'In Progress'), ('resolved', 'Resolved'), ('wont-fix', "Won't Fix")], default='opened', max_length=1024),
),
]
|
jeor0980/TeamChampion | hatServer/config.py | Python | mit | 60 | 0 | S | ECRET_KEY = 'you-w | ill-never-guess'
MONGO_DB = 'flask_test'
|
eeshangarg/zulip | zerver/data_import/slack.py | Python | apache-2.0 | 52,071 | 0.002016 | import logging
import os
import random
import secrets
import shutil
import subprocess
import zipfile
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Type, TypeVar
import orjson
import requests
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.timezone import now as timezone_now
from zerver.data_import.import_util import (
ZerverFieldsT,
build_attachment,
build_avatar,
build_defaultstream,
build_huddle,
build_message,
build_realm,
build_recipient,
build_stream,
build_subscription,
build_usermessages,
build_zerver_realm,
create_converted_data_files,
make_subscriber_map,
process_avatars,
process_emojis,
process_uploads,
)
from zerver.data_import.sequencer import NEXT_ID
from zerver.data_import.slack_message_conversion import (
convert_to_zulip_markdown,
get_user_full_name,
)
from zerver.lib.emoji import name_to_codepoint
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.lib.upload import resize_logo, sanitize_name
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
Reaction,
Realm,
RealmEmoji,
Recipient,
UserProfile,
)
SlackToZulipUserIDT = Dict[str, int]
AddedChannelsT = Dict[str, Tuple[str, int]]
AddedMPIMsT = Dict[str, Tuple[str, int]]
DMMembersT = Dict[str, Tuple[str, str]]
SlackToZulipRecipientT = Dict[str, int]
# Generic type for SlackBotEmail class
SlackBotEmailT = TypeVar("SlackBotEmailT", bound="SlackBotEmail")
class SlackBotEmail:
duplicate_email_count: Dict[str, int] = {}
# Mapping of `bot_id` to final email assigned to the bot.
assigned_email: Dict[str, str] = {}
@classmethod
def get_email(cls: Type[SlackBotEmailT], user_profile: ZerverFieldsT, domain_name: str) -> str:
slack_bot_id = user_profile["bot_id"]
if slack_bot_id in cls.assigned_email:
return cls.assigned_email[slack_bot_id]
if "real_name_normalized" in user_profile:
| slack_bot_name = user_profile["real_name_normalized"]
elif "first_name" in user_profile:
slack_bot_name = user_profile["first_name"]
| else:
raise AssertionError("Could not identify bot type")
email = slack_bot_name.replace("Bot", "").replace(" ", "") + f"-bot@{domain_name}"
if email in cls.duplicate_email_count:
email_prefix, email_suffix = email.split("@")
email_prefix += cls.duplicate_email_count[email]
email = "@".join([email_prefix, email_suffix])
# Increment the duplicate email count
cls.duplicate_email_count[email] += 1
else:
cls.duplicate_email_count[email] = 1
cls.assigned_email[slack_bot_id] = email
return email
def rm_tree(path: str) -> None:
if os.path.exists(path):
shutil.rmtree(path)
def slack_workspace_to_realm(
domain_name: str,
realm_id: int,
user_list: List[ZerverFieldsT],
realm_subdomain: str,
slack_data_dir: str,
custom_emoji_list: ZerverFieldsT,
) -> Tuple[
ZerverFieldsT,
SlackToZulipUserIDT,
SlackToZulipRecipientT,
AddedChannelsT,
AddedMPIMsT,
DMMembersT,
List[ZerverFieldsT],
ZerverFieldsT,
]:
"""
Returns:
1. realm, converted realm data
2. slack_user_id_to_zulip_user_id, which is a dictionary to map from Slack user id to Zulip user id
3. slack_recipient_name_to_zulip_recipient_id, which is a dictionary to map from Slack recipient
name(channel names, mpim names, usernames, etc) to Zulip recipient id
4. added_channels, which is a dictionary to map from channel name to channel id, Zulip stream_id
5. added_mpims, which is a dictionary to map from MPIM name to MPIM id, Zulip huddle_id
6. dm_members, which is a dictionary to map from DM id to tuple of DM participants.
7. avatars, which is list to map avatars to Zulip avatar records.json
8. emoji_url_map, which is maps emoji name to its Slack URL
"""
NOW = float(timezone_now().timestamp())
zerver_realm: List[ZerverFieldsT] = build_zerver_realm(realm_id, realm_subdomain, NOW, "Slack")
realm = build_realm(zerver_realm, realm_id, domain_name)
(
zerver_userprofile,
avatars,
slack_user_id_to_zulip_user_id,
zerver_customprofilefield,
zerver_customprofilefield_value,
) = users_to_zerver_userprofile(slack_data_dir, user_list, realm_id, int(NOW), domain_name)
(
realm,
added_channels,
added_mpims,
dm_members,
slack_recipient_name_to_zulip_recipient_id,
) = channels_to_zerver_stream(
slack_data_dir, realm_id, realm, slack_user_id_to_zulip_user_id, zerver_userprofile
)
zerver_realmemoji, emoji_url_map = build_realmemoji(custom_emoji_list, realm_id)
realm["zerver_realmemoji"] = zerver_realmemoji
# See https://zulip.com/help/set-default-streams-for-new-users
# for documentation on zerver_defaultstream
realm["zerver_userprofile"] = zerver_userprofile
realm["zerver_customprofilefield"] = zerver_customprofilefield
realm["zerver_customprofilefieldvalue"] = zerver_customprofilefield_value
return (
realm,
slack_user_id_to_zulip_user_id,
slack_recipient_name_to_zulip_recipient_id,
added_channels,
added_mpims,
dm_members,
avatars,
emoji_url_map,
)
def build_realmemoji(
custom_emoji_list: ZerverFieldsT, realm_id: int
) -> Tuple[List[ZerverFieldsT], ZerverFieldsT]:
zerver_realmemoji = []
emoji_url_map = {}
emoji_id = 0
for emoji_name, url in custom_emoji_list.items():
if "emoji.slack-edge.com" in url:
# Some of the emojis we get from the API have invalid links
# this is to prevent errors related to them
realmemoji = RealmEmoji(
name=emoji_name, id=emoji_id, file_name=os.path.basename(url), deactivated=False
)
realmemoji_dict = model_to_dict(realmemoji, exclude=["realm", "author"])
realmemoji_dict["author"] = None
realmemoji_dict["realm"] = realm_id
emoji_url_map[emoji_name] = url
zerver_realmemoji.append(realmemoji_dict)
emoji_id += 1
return zerver_realmemoji, emoji_url_map
def users_to_zerver_userprofile(
slack_data_dir: str, users: List[ZerverFieldsT], realm_id: int, timestamp: Any, domain_name: str
) -> Tuple[
List[ZerverFieldsT],
List[ZerverFieldsT],
SlackToZulipUserIDT,
List[ZerverFieldsT],
List[ZerverFieldsT],
]:
"""
Returns:
1. zerver_userprofile, which is a list of user profile
2. avatar_list, which is list to map avatars to Zulip avatard records.json
3. slack_user_id_to_zulip_user_id, which is a dictionary to map from Slack user ID to Zulip
user id
4. zerver_customprofilefield, which is a list of all custom profile fields
5. zerver_customprofilefield_values, which is a list of user profile fields
"""
logging.info("######### IMPORTING USERS STARTED #########\n")
zerver_userprofile = []
zerver_customprofilefield: List[ZerverFieldsT] = []
zerver_customprofilefield_values: List[ZerverFieldsT] = []
avatar_list: List[ZerverFieldsT] = []
slack_user_id_to_zulip_user_id = {}
# The user data we get from the Slack API does not contain custom profile data
# Hence we get it from the Slack zip file
slack_data_file_user_list = get_data_file(slack_data_dir + "/users.json")
slack_user_id_to_custom_profile_fields: ZerverFieldsT = {}
slack_custom_field_name_to_zulip_custom_field_id: ZerverFieldsT = {}
for user in slack_data_file_user_list:
process_slack_custom_fields(user, slack_user_id_to_custom_profile_fields)
# We have only one primary owner in Slack, see link
# https://get.slack.help/hc/en-us/articles/201912948-Owners-and-Administrators
# This is to import the primary owner first from all the users
user_id_count = custom_profile_field_value_id_c |
aroth-arsoft/arsoft-web-ddns | manage.py | Python | gpl-3.0 | 259 | 0 | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arso | ft.web.ddns.settings")
from django.core.management import execute_fr | om_command_line
execute_from_command_line(sys.argv)
|
kyleconroy/brushwagg | scrape.py | Python | mit | 2,745 | 0 | import requests
import json
import os
import string
from lxml import etree
SITE_ROOT = "http://magiccards.info"
SITE_MAP = "http://magiccards.info/sitemap.html"
GET_CARD_ABILITIES = True
def filename_encode(name):
return ''.join([n for n in name if n in string.digits + string.letters])
def parse_sets(url):
html = etree.HTML(requests.get(url).content)
# Select the second table
table = html.findall(".//table")[1]
for header in table.findall(".//h3"):
if header.text in ["Expansions", "Core Sets"]:
ul = header.getnext()
for a in ul.findall(".//a"):
yield a.text, SITE_ROOT + a.attrib["href"]
def parse_cards(url):
html = etree.HTML(requests.get(url).content)
card_set, lang = html.find(".//h1").findtext(".//small").split("/")
tables = html.findall(".//table")
table = tables[3]
cards = []
for tr in table.findall(".//tr")[1:]:
no, name, ctype, mana, rarity, artist, edition = tr.findall(".//td")
card_url = "{}/{}/{}/{}.html".format(
SITE_ROOT, card_set, lang, no.text)
image_url = "{}/scans/{}/{}/{}.jpg".format(
SITE_ROOT, lang, card_set, no.text)
if GET_CARD_ABILITIES:
cardhtml = etree.HTML(requests.get(card | _url).content)
cardTables = cardhtml.findall(".//table")
cardTable = cardTables[3]
abilities = cardTable.find(".//b").text
cards.app | end({
"number": no.text,
"url": card_url,
"image": image_url,
"name": name.find("a").text,
"type": ctype.text,
"mana": mana.text,
"rarity": rarity.text,
"artist": artist.text,
"edition": card_set,
"abilities": abilities,
})
else:
cards.append({
"number": no.text,
"url": card_url,
"image": image_url,
"name": name.find("a").text,
"type": ctype.text,
"mana": mana.text,
"rarity": rarity.text,
"artist": artist.text,
"edition": card_set,
})
return cards
if __name__ == "__main__":
# Make sets directory
if not os.path.isdir("sets"):
os.mkdir("sets")
for name, url in parse_sets(SITE_MAP):
card_set = {
"name": name,
"url": url,
"cards": parse_cards(url),
}
f = open("sets/{}.json".format(filename_encode(name)), "w")
json.dump(card_set, f, sort_keys=True, indent=4)
f.close()
print "Finished set"
|
adam704a/useful-scripts | move_tweets.py | Python | unlicense | 853 | 0.021102 | from pymongo import MongoClient
import dateutil
import dateutil.parser
import datetime
import slack
import slack.chat
slack.api_token = 'SLACK_KEY'
c1 = MongoClient("SOURCE_MONGO_URL")
d1 = c1.mj_tweets
l1 = d1.mj_sample
c2 = MongoClient("DESTINATION_MONGO_URL")
d2 = c2.mj_sample
l2 = d2.mj_sample
sta | rt_time = datetime.datetime(2015, 6, 2)
end_time = datetime.datetime(2015, 6, 24)
counter = 1
slack.chat.post_message('mjsif_pipeline',"Tweet Mover launched.",username="Tweet Mover Utility",icon_emoji=':taxi:')
for t in l1.find( {"created_at" : { "$lte" : end_time, | "$gte": start_time}},{'_id': False} ):
try:
l2.insert(t)
counter += 1
except:
pass
if counter % 10000 == 0:
slack.chat.post_message('mjsif_pipeline',str(counter) + " tweets moved",username="Date Mover Utility",icon_emoji=':taxi:') |
M2IS/vendendo | landpage/urls.py | Python | gpl-2.0 | 166 | 0 | from django.conf.urls import url
fro | m . import views
app_name = 'landpage'
urlpatterns = [
url(r'^$', views.LandPageIn | dex.as_view(), name='landpage-index'),
]
|
tvtsoft/odoo8 | addons/delivery/__init__.py | Python | agpl-3.0 | 170 | 0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and lice | nsing details.
import delivery
import partn | er
import sale
import stock
import models
|
dimagi/rapidsms-contrib-apps-dev | training/admin.py | Python | bsd-3-clause | 215 | 0 | #!/u | sr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.contrib import admin
from .models import *
admin.site.register(MessageInWaiting)
admin.site.register(ResponseInWaiting)
admin.site.regi | ster(Template)
|
cognitect/transit-python | transit/read_handlers.py | Python | apache-2.0 | 3,818 | 0.003667 | ## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from transit import pyversion, transit_types
import uuid
import ctypes
import dateutil.parser
import datetime
import dateutil.tz
from transit.helpers import pairs
from decimal import Decimal
## Read handlers are used by the decoder when parsing/reading in Transit
## data and returning Python objects
class DefaultHandler(object):
@staticmethod
def from_rep(t, v):
return transit_types.TaggedValue(t, v)
class NoneHandler(object):
@staticmethod
def from_rep(_):
return None
class KeywordHandler(object):
@staticmethod
def from_rep(v):
return transit_types.Keyword(v)
class SymbolHandler(object):
@staticmethod
def from_rep(v):
return transit_types.Symbol(v)
class BigDecimalHandler(object):
@staticmethod
def from_rep(v):
return Decimal(v)
class BooleanHandler(object):
@staticmethod
def from_rep(x):
return transit_types.true if x == "t" else transit_types.false
class IntHandler(object):
@staticmethod
def from_rep(v):
return int(v)
class FloatHandler(object):
@staticmethod
def from_rep(v):
return float(v)
class UuidHandler(object):
@staticmethod
def from_rep(u):
"""Given a string, return a UUID object."""
if isinstance(u, pyversion.string_types):
return uuid.UUID(u)
# hack to remove signs
a = ctypes.c_ulong(u[0])
b = ctypes.c_ulong(u[1])
combined = a.value << 64 | b.value
return uuid.UUID(int=combined)
class UriHandler(object):
@staticmethod
def from_rep(u):
return transit_types.URI(u)
class DateHandler(object):
@staticmethod
def from_rep(d):
if isinstance(d, pyversion.int_types):
return DateHandler._convert_timestamp(d)
if "T" in d:
return dateutil.parser.parse(d)
return DateHandler._convert_timestamp(pyversion.long_type(d))
@staticmethod
def _convert_timestamp(ms):
"""Given a timestamp in ms, return a DateTime object."""
return datetime.datetime.fromtimestamp(ms/1000.0, dateutil.tz.tzutc())
if pyversion.PY3:
class BigIntegerHandler(object):
@staticmethod
def from_rep(d):
return int(d)
else:
class BigIntegerHandler(object):
@staticmethod
def from_rep(d):
return long(d)
class LinkHandler(object):
@staticmethod
def from_rep(l):
return transit_types.Link(**l)
class ListHandler(object):
@staticmethod
def from_rep(l):
return l
class SetHandler(object):
@staticmethod
def from_rep(s):
return frozenset(s)
class CmapHandler(object):
@staticmethod
def from_rep(cmap):
ret | urn transit_types.frozendict(pairs(cmap))
class IdentityHandler(object):
@staticmethod
def from_rep(i): |
return i
class SpecialNumbersHandler(object):
@staticmethod
def from_rep(z):
if z == 'NaN':
return float('Nan')
if z == 'INF':
return float('Inf')
if z == '-INF':
return float('-Inf')
raise ValueError("Don't know how to handle: " + str(z) + " as \"z\"")
|
JustasB/MitralSuite | Models/Migliore2014/gidfunc.py | Python | mit | 103 | 0.019417 | import params
def mgid2glom(gid) | :
return int((gid-params.gid_mitral_beg | in)/params.Nmitral_per_glom)
|
vlegoff/tsunami | src/primaires/scripting/actions/detruire_sortie.py | Python | bsd-3-clause | 2,726 | 0.000735 | # | -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyrig | ht notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY teleporterCT, INteleporterCT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action detruire_sortie."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Détruit une sortie d'une salle."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.detruire_sortie, "Salle", "str")
@staticmethod
def detruire_sortie(salle, direction):
"""Détruit une sortie de salle et sa réciproque de la destination.
La direction est à choisir parmi est, ouest, nord, sud, nord-est,
nord-ouest, sud-est, sud-ouest, haut et bas.
"""
try:
direction = salle.sorties.get_nom_long(direction)
except KeyError:
raise ErreurExecution("direction {} inconnue".format(direction))
if not salle.sorties.sortie_existe(direction):
raise ErreurExecution("sortie {} non définie".format(direction))
d_salle = salle.sorties[direction].salle_dest
dir_opposee = salle.sorties.get_nom_oppose(direction)
d_salle.sorties.supprimer_sortie(dir_opposee)
salle.sorties.supprimer_sortie(direction)
|
husarion/django-registration | registration/tests/urls.py | Python | bsd-3-clause | 4,623 | 0.00411 | """
URLs used in the unit tests for django-registration.
You should not attempt to use these URLs in any sort of real or
development environment; instead, use
``registration/backends/default/urls.py``. This URLconf includes those
URLs, and also adds several additional URLs which serve no purpose
other than to test that optional keyword arguments are properly
handled.
"""
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from ..views import activate
from ..views import register
urlpatterns = patterns('',
# Test the 'activate' view with custom template
# name.
url(r'^activate-with-template-name/(?P<activation_key>\w+)/$',
activate,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_template_name'),
# Test the 'activate' view with
# extra_context_argument.
url(r'^activate-extra-context/(?P<activation_key>\w+)/$',
activate,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_extra_context'),
# Test the 'activate' view with success_url argument.
url(r'^activate-with-success-url/(?P<activation_key>\w+)/$',
activate,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_activate_success_url'),
# Test the 'register' view with custom template
# name.
url(r'^register-with-template-name/$',
register,
{'template_name': 'registration/test_template_name.html',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_template_name'),
# Test the'register' view with extra_context
# argument.
url(r'^register-extra-context/$',
register,
{'extra_context': {'foo': 'bar', 'callable': lambda: 'called'},
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_extra_context'),
# Test the 'register' view with custom URL for
# closed registration.
url(r'^register-with-disallowed-url/$',
register,
{'disallowed_url': 'registration_test | _custom_disallowed',
'backend': | 'registration.backends.default.DefaultBackend'},
name='registration_test_register_disallowed_url'),
# Set up a pattern which will correspond to the
# custom 'disallowed_url' above.
url(r'^custom-disallowed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_test_custom_disallowed'),
# Test the 'register' view with custom redirect
# on successful registration.
url(r'^register-with-success_url/$',
register,
{'success_url': 'registration_test_custom_success_url',
'backend': 'registration.backends.default.DefaultBackend'},
name='registration_test_register_success_url'
),
# Pattern for custom redirect set above.
url(r'^custom-success/$',
direct_to_template,
{'template': 'registration/test_template_name.html'},
name='registration_test_custom_success_url'),
(r'', include('registration.backends.default.urls')),
)
|
jggatc/nexus | interface.py | Python | gpl-3.0 | 4,260 | 0.039437 | from __future__ import division
import env
engine = env.engine
import interphase
interphase.init(engine)
class MatrixInterface(interphase.Interface):
def __init__(self, identity, matrix, control):
self.matrix = matrix
self.control = control
interphase.Interface.__init__(self, identity, position=(self.matrix.width//2,self.matrix.height-50), color=(15,30,50), size=(200,100), control_minsize=(35,35), control_size='auto', control_response=100, moveable=False, position_offset=(0,98), font_color=(50,150,200), scroll_button='vertical')
self.get_control('Activate').set_active(False)
def add_controls(self):
self.add(
identity = 'Control',
control_type = 'function_toggle',
position = (100,90),
size = 'auto_width',
font_color = (125,175,200),
control_list = ['Help','Main'],
link = [['Bootup'],['Guide']],
link_activated = True,
control_outline = True)
self.add(
identity = 'Bootup',
control_type = 'control_toggle',
position = (100,50),
size = 'auto',
font_color = (12 | 5,175,200),
control_list = ['Bootup'],
tip_list = ['Nexus activate'])
self.add(
identity = 'Activate',
control_type = 'control_toggle',
position = (100,50),
size = 'auto',
font_color = (125,175,200),
c | ontrol_list = ['Pause','Activate'],
tip_list = ['Nexus pause','Nexus activate'])
self.add(
identity = 'DataProcessedTop',
control_type = 'label',
position = (60,44),
size = (40,20),
font_color = (125,175,200),
control_list = ['0'],
tip_list = ['Nexus Data Integration'])
self.add(
identity = 'DataProcessed',
control_type = 'label',
position = (60,84),
size = (40,20),
font_color = (125,175,200),
control_list = ['0'],
tip_list = ['Nexus Data Integration'])
self.add(
identity = 'DataProcessedTopLabel',
control_type = 'label',
position = (60,24),
size = (40,20),
font_color = (125,175,200),
control_list = ['Top'],
tip_list = ['Nexus Data Integration'])
self.add(
identity = 'DataProcessedLabel',
control_type = 'label',
position = (60,64),
size = (40,20),
font_color = (125,175,200),
control_list = ['Current'],
tip_list = ['Nexus Data Integration'])
self.add(
identity = 'Guide',
control_type = 'textbox',
position = (100,50),
size = (170,70),
color = (49,57,65),
font_color = (125,175,200),
font_size = 10,
font_type = 'arial',
control_list = [self.control.guide],
label_display = False)
def set_data_processed(self, dataprocessed):
self.get_control('DataProcessed').set_value(str(dataprocessed))
def set_data_processed_top(self, dataprocessed):
self.get_control('DataProcessedTop').set_value(str(dataprocessed))
def reset(self):
self.control.set_panel_display()
self.get_control('Activate').set_active(False)
self.get_control('Bootup').set_active(True)
def update(self):
state = interphase.Interface.update(self)
if state.control:
if state.control == 'Bootup':
self.control.set_panel_display()
state.controls['Bootup'].set_active(False)
state.controls['Activate'].set_active(True)
self.matrix.nexus.initiation_activate()
elif state.control == 'Activate':
if not self.matrix.nexus.initiate:
state.controls[state.control].next()
return
if state.value == 'Pause':
self.matrix.set_active(True)
elif state.value == 'Activate':
self.matrix.set_active(False)
|
SabatierBoris/CecileWebSite | pyramidapp/tests/testexample.py | Python | gpl-2.0 | 407 | 0 | # vim: set fileencoding=utf-8 :
"""
This is a example of a unittest module
"""
import unittest
class TestExample(unitte | st.TestCase):
"""
Test of test
"""
def test_one(self):
"""
This is a example of a unittest
"""
self.assertFalse(False)
def test_two(self):
"""
This is a example of a unittest
"""
self.assertTrue(True)
| |
LukeM12/samba | buildtools/wafsamba/samba_bundled.py | Python | gpl-3.0 | 8,139 | 0.003686 | # functions to support bundled libraries
from Configure import conf
import sys, Logs
from samba_utils import *
def PRIVATE_NAME(bld, name, private_extension, private_library):
'''possibly rename a library to include a bundled extension'''
# we now use the same private name for libraries as the public name.
# see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a
# demonstration that this is the right thing to do
# also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html
return name
def target_in_list(target, lst, default):
for l in lst:
if target == l:
return True
if '!' + target == l:
return False
if l == 'ALL':
return True
if l == 'NONE':
return False
return default
def BUILTIN_LIBRARY(bld, name):
'''return True if a library should be builtin
instead of being built as a shared lib'''
return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False)
Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY
def BUILTIN_DEFAULT(opt, builtins):
'''set a comma separated default list of builtin libraries for this package'''
if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options:
return
Options.options['BUILTIN_LIBRARIES_DEFAULT'] = builtins
Options.Handler.BUILTIN_DEFAULT = BUILTIN_DEFAULT
def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''):
'''set a default private library extension'''
if 'PRIVATE_EXTENSION_DEFAULT' in Options.options:
return
Options.options['PRIVATE_EXTENSION_DEFAULT'] = extension
Options.options['PRIVATE_EXTENSION_EXCEPTION'] = noextension
Options.Handler.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT
def minimum_library_version(conf, libname, default):
'''allow override of mininum system library version'''
minlist = Options.options.MINIMUM_LIBRARY_VERSION
if not minlist:
return default
for m in minlist.split(','):
a = m.split(':')
if len(a) != 2:
Logs.error("Bad syntax for --minimum-library-version of %s" % m)
sys.exit(1)
if a[0] == libname:
return a[1]
return default
@conf
def LIB_MAY_BE_BUNDLED(conf, libname):
if libname in conf.env.BUNDLED_LIBS:
return True
if '!%s' % libname in conf.env.BUNDLED_LIBS:
return False
if 'NONE' in conf.env.BUNDLED_LIBS:
return False
return True
@conf
def LIB_MUST_BE_BUNDLED(conf, libname):
if libname in conf.env.BUNDLED_LIBS:
return True
if '!%s' % libname in conf.env.BUNDLED_LIBS:
return False
if 'ALL' in conf.env.BUNDLED_LIBS:
return True
return False
@conf
def LIB_MUST_BE_PRIVATE(conf, libname):
return ('ALL' in conf.env.PRIVATE_LIBS or
libna | me in conf.env.PRIVAT | E_LIBS)
@conf
def CHECK_PREREQUISITES(conf, prereqs):
missing = []
for syslib in TO_LIST(prereqs):
f = 'FOUND_SYSTEMLIB_%s' % syslib
if not f in conf.env:
missing.append(syslib)
return missing
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0',
onlyif=None, implied_deps=None, pkg=None):
'''check if a library is available as a system library.
This only tries using pkg-config
'''
return conf.CHECK_BUNDLED_SYSTEM(libname,
minversion=minversion,
onlyif=onlyif,
implied_deps=implied_deps,
pkg=pkg)
@runonce
@conf
def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0',
checkfunctions=None, headers=None, checkcode=None,
onlyif=None, implied_deps=None,
require_headers=True, pkg=None):
'''check if a library is available as a system library.
this first tries via pkg-config, then if that fails
tries by testing for a specified function in the specified lib
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
found = 'FOUND_SYSTEMLIB_%s' % libname
if found in conf.env:
return conf.env[found]
def check_functions_headers_code():
'''helper function for CHECK_BUNDLED_SYSTEM'''
if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname):
return False
if checkfunctions is not None:
ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers,
empty_decl=False, set_target=False)
if not ok:
return False
if checkcode is not None:
define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper()
ok = conf.CHECK_CODE(checkcode, lib=libname,
headers=headers, local_include=False,
msg=msg, define=define)
conf.CONFIG_RESET(define)
if not ok:
return False
return True
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
if onlyif:
missing = conf.CHECK_PREREQUISITES(onlyif)
if missing:
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing))
sys.exit(1)
conf.env[found] = False
return False
minversion = minimum_library_version(conf, libname, minversion)
msg = 'Checking for system %s' % libname
if minversion != '0.0.0':
msg += ' >= %s' % minversion
uselib_store=libname.upper()
if pkg is None:
pkg = libname
# try pkgconfig first
if (conf.check_cfg(package=pkg,
args='"%s >= %s" --cflags --libs' % (pkg, minversion),
msg=msg, uselib_store=uselib_store) and
check_functions_headers_code()):
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
return True
if checkfunctions is not None:
if check_functions_headers_code():
conf.env[found] = True
if implied_deps:
conf.SET_SYSLIB_DEPS(libname, implied_deps)
conf.SET_TARGET_TYPE(libname, 'SYSLIB')
return True
conf.env[found] = False
if not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return False
def tuplize_version(version):
return tuple([int(x) for x in version.split(".")])
@runonce
@conf
def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'):
'''check if a python module is available on the system and
has the specified minimum version.
'''
if conf.LIB_MUST_BE_BUNDLED(libname):
return False
# see if the library should only use a system version if another dependent
# system version is found. That prevents possible use of mixed library
# versions
minversion = minimum_library_version(conf, libname, minversion)
try:
m = __import__(modulename)
except ImportError:
found = False
else:
try:
version = m.__version__
except AttributeError:
found = False
else:
found = tuplize_version(version) >= tuplize_version(minversion)
if not found and not conf.LIB_MAY_BE_BUNDLED(libname):
Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion))
sys.exit(1)
return found
def NONSHARED_BINARY(bld, name):
'''return True if a binary should be built without non-system shared libs'''
return target_in_list(name, bld.env.NONSHARED_BINARIES, False)
Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY
|
Pardus-Ahtapot/GDYS | ahtapot-gkts/var/opt/ahtapot-gkts/developer/rule/models.py | Python | gpl-3.0 | 1,975 | 0.006643 | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class Rule(mo | dels.Model):
source = models.CharField(max_length=80, verbose_name="Kaynak", help_text="Kaynak IP veya FQDN Bilgisi Giriniz(Tek)")
destination = models.CharField(max_length=80, verbose_name="Hedef", help_text="Hedef IP veya FQDN Bilgisi Giriniz(Tek)")
destination_port = models.IntegerField(verbose_name= | "Hedef Port", help_text=u"Hedef Port Numarasını Giriniz(Tek)")
protocol = models.CharField(max_length=80, verbose_name="Protokol", help_text=u"Protokol Bilgisini Giriniz(Tek)")
time = models.IntegerField(verbose_name=u"Süre", help_text=u"Aktif Olacağı Süreyi Dakika Türünden Giriniz(Tek)")
developer = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name=u"Geliştirici")
fqdn = models.CharField(max_length=80, verbose_name=u"Güvenlik Duvarı", help_text=u"IP veya FQDN Giriniz(Tek)")
definition = models.CharField(max_length=255, verbose_name=u"Açıklama", help_text=u"Maksimum 255 Karakter")
def __str__(self):
return self.fqdn + " |**| " + self.developer.username + " |**| " + self.source + " |**| " + self.destination +\
" |**| " + str(self.destination_port)
class Meta:
verbose_name = "Kural"
verbose_name_plural = "Kurallar"
class ActivatedRule(models.Model):
rule = models.ForeignKey(Rule, verbose_name="Kural")
rule_cmd = models.CharField(max_length=255, verbose_name="Kural Komutu")
operated_at = models.DateTimeField(default=datetime.now(), verbose_name=u"Oluşturulma Tarihi")
active_until = models.DateTimeField(default=datetime.now(), verbose_name=u"Aktif Kalacağı Zaman")
def __str__(self):
return self.rule
class Meta:
verbose_name = u"Aktif Edilmiş Kural"
verbose_name_plural = u"Aktif Edilmiş Kurallar"
|
mordred-descriptor/mordred | mordred/FragmentComplexity.py | Python | bsd-3-clause | 978 | 0 | from __future__ import division
from ._base import De | scriptor
__all__ = ("FragmentComplexity",)
class FragmentComplexity(Descriptor):
r"""fragment complexity descriptor.
.. math::
{\rm fragCpx} = \left| B^2 - A^2 + A \right| + \frac{H}{100}
where
:math:`A` is number of atoms,
:math:`B` is number of bonds,
:math:`H` is number of hetero atoms
References
* :doi:`10.1021/ci050521b`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "fragment complexity"
| @classmethod
def preset(cls, version):
yield cls()
explicit_hydrogens = False
def parameters(self):
return ()
def __str__(self):
return "fragCpx"
def calculate(self):
A = self.mol.GetNumAtoms()
B = self.mol.GetNumBonds()
H = sum(1 for a in self.mol.GetAtoms() if a.GetAtomicNum() != 6)
return abs(B ** 2 - A ** 2 + A) + H / 100
rtype = float
|
GhalebKhaled/fb-bot-test | settings/local.py | Python | apache-2.0 | 105 | 0 | from __f | uture__ import unicode_literals
from . | base import *
GI_APPLICATION = u'wsgi.local.application'
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/affiliation_group_v30_rc1_invited_position_summary_v30_rc1.py | Python | mit | 5,818 | 0.000516 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.external_i_ds_v30_rc1 import ExternalIDsV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.invited_position_summary_v30_rc1 import InvitedPositionSummaryV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
class AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30Rc1',
'external_ids': 'ExternalIDsV30Rc1',
'summaries': 'list[InvitedPositionSummaryV30Rc1]'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'external_ids': 'external-ids',
'summaries': 'summaries'
}
def __init__(self, last_modified_date=None, external_ids=None, summaries=None): # noqa: E501
"""AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._external_ids = None
self._summaries = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if external_ids is not None:
self.external_ids = external_ids
if summaries is not None:
self.summaries = summaries
@property
def last_modified_date(self):
"""Gets the last_modified_date of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:return: The last_modified_date of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1.
:param last_modified_date: The last_modified_date of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def external_ids(self):
"""Gets the external_ids of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:return: The external_ids of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:rtype: ExternalIDsV30Rc1
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1.
:param external_ids: The external_ids of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:type: ExternalIDsV30Rc1
"""
self._external_ids = external_ids
@property
def summaries(self):
"""Gets the summaries of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:return: The summaries of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:rtype: list[InvitedPositionSummaryV30Rc1]
"""
return self._summaries
@summaries.setter
def summaries(self, summaries):
"""Sets the summaries of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1.
:param summaries: The summaries of this AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1. # noqa: E501
:type: list[InvitedPositionSummaryV30Rc1]
"""
self._summaries = summaries
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AffiliationGroupV30Rc1InvitedPositionSu | mmaryV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self. | to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AffiliationGroupV30Rc1InvitedPositionSummaryV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Xiaomi2008/Caffe_3D_FF | tools/convert_mat2levelDB.py | Python | bsd-2-clause | 2,667 | 0.032996 | import sys, getopt
sys.path.insert(0, '/home/tzeng/space/Caffe_3D_FF/trunk/python')
#sys.path.insert(0,'/home/tzeng/autoGenelable_multi_lables_proj/code/py-leveldb-read-only/build/lib.linux-x86_64-2.7')
import numpy as np
import hdf5storage
import leveldb
from leveldb import WriteBatch, LevelDB
import os
import imp
#import caffe
#sys.path.append('/home/tzeng/caffe_3d/python/caffe')
#foo = imp.load_source('caffe.io', '/home/tzeng/caffe_3d/python/caffe/__init__.py')
import caffe.io
from caffe.proto impor | t caffe_pb2
print os.path.dirname(caffe_pb2.__file__)
#from caffe.proto import caffe_pb2
mat_file ='/home/tzeng/space/SegEM_project/data/train_segem.mat'
#mat_file ='/home/tzeng/caffe_flx_kernel | /data/snems3d_train_RF8.mat'
#mat_file ='/home/tzeng/caffe_flx_kernel/data/snems3d_test_pad_2_47_47.mat'
#mat_file= '/home/tzeng/caffe_flx_kernel/data/snems3d_train_RF8_20Percent.mat'
#mat_file= '/home/tzeng/caffe_flx_kernel/data/snems3d_predict_norm.mat'
#snems3d_train_pad_4_47_47.mat'
#mat_file ='/home/tzeng/caffe_3d/data/snems3d_test_pad25.mat'
#mat_file ='/home/tzeng/caffe_3d/data/test'
out =hdf5storage.loadmat(mat_file,format='7.3')
#print len(out)
size = out['data'].shape;
size=size[1];
print size
k=1
#db_path_data='/home/tzeng/caffe_3d/data/mri_test_pad'
db_path_data='/home/tzeng/space/SegEM_project/data/train_segem'
#db_path_data='/home/tzeng/caffe_3d/data/snems3d_train_pad25'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_train_pad_4_47_47_rotations_hFlip'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_predict_norm'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_test_norm'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_test_pad_2_47_47_FlipRatation'
#snems3d_test_submit_pad25'
db_data_lb=leveldb.LevelDB(db_path_data, create_if_missing=True, error_if_exists=False)
batch = leveldb.WriteBatch()
for k in range(size):
p =out['data'][0,k]
#l =out['labels'][0,k]
elm_l=out['elm_labels'][0,k]
#print p
dim_3d=p.shape
print(dim_3d)
dim_4d=[1]
#print p[:,32,20]
for i in (dim_3d):
dim_4d.append(i)
d=np.reshape(p,dim_4d).astype('uint8')
print " max =%d min =%d" %(d.max(), d.min())
elm_d=np.reshape(elm_l,dim_4d).astype('float')
print d.shape
#labels=[l.astype(int).tolist()]
#print type(labels[0])
#print labels
#datum= caffe.io.array_to_datum(d,labels)
datum=caffe.io.elemetwise_array_to_datum(d, elm_d)
db_data_lb.Put('%08d' % k, datum.SerializeToString())
#datum = caffe_pb2.Datum()
#print datum.label.extend([1,2,3,4,5])
#print datum.label;
print(k)
db_data_lb.Write(batch)
#datum.clear_float_data();
|
tonyli71/designate | designate/tests/test_api/test_admin/extensions/test_reports.py | Python | apache-2.0 | 5,116 | 0 | # coding=utf-8
# COPYRIGHT 2015 Rackspace
#
# Author: Betsy Luzader <betsy.luzader@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_admin import AdminApiTestCase
cfg.CONF.import_opt('enabled_extensions_admin', 'designate.api.admin',
group='service:api')
class AdminApiReportsTest(AdminApiTestCase):
def setUp(self):
self.config(enabled_extensions_admin=['reports'], group='service:api')
super(AdminApiReportsTest, self).setUp()
def test_get_counts(self):
self.policy({'count_tenants': '@'})
response = self.client.get('/reports/counts')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('counts', response.json)
self.assertIn('zones', response.json['counts'][0])
self.assertIn('rec | ords', response.json['counts'][0])
self.assertIn('tenants', response.json['counts'][0])
# Assert that they are all equal to 0
self.assertEqual(0, response.json['counts'][0]['zones'])
self.assertEqual(0, response.json['counts'][0]['records'])
self.assertEqual(0, response.json['counts'][0]['tenants'])
# Add | a domain and check the counts
self.create_domain()
response = self.client.get('/reports/counts')
# Should be one domain
self.assertEqual(1, response.json['counts'][0]['zones'])
# Should be 1 NS and 1 SOA records
self.assertEqual(2, response.json['counts'][0]['records'])
# Should be 1 tenant
self.assertEqual(1, response.json['counts'][0]['tenants'])
def test_get_counts_zones(self):
self.policy({'count_domains': '@'})
response = self.client.get('/reports/counts/zones')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('counts', response.json)
self.assertIn('zones', response.json['counts'][0])
self.assertEqual(0, response.json['counts'][0]['zones'])
# Create 2 domains
self.create_domain(fixture=0)
self.create_domain(fixture=1)
response = self.client.get('/reports/counts/zones')
self.assertEqual(2, response.json['counts'][0]['zones'])
def test_get_counts_records(self):
self.policy({'count_records': '@'})
response = self.client.get('/reports/counts/records')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('counts', response.json)
self.assertIn('records', response.json['counts'][0])
self.assertEqual(0, response.json['counts'][0]['records'])
# Create a domain
self.create_domain()
response = self.client.get('/reports/counts/records')
# Should have 1 NS and 1 SOA record
self.assertEqual(2, response.json['counts'][0]['records'])
def test_get_counts_tenants(self):
self.policy({'count_tenants': '@'})
response = self.client.get('/reports/counts/tenants')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('counts', response.json)
self.assertIn('tenants', response.json['counts'][0])
self.assertEqual(0, response.json['counts'][0]['tenants'])
# Create a domain
self.create_domain()
response = self.client.get('/reports/counts/tenants')
# Should have 1 tenant
self.assertEqual(1, response.json['counts'][0]['tenants'])
def test_get_tenants(self):
self.policy({'find_tenants': '@'})
self.create_domain()
response = self.client.get('/reports/tenants')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('tenants', response.json)
self.assertIn('zone_count', response.json['tenants'][0])
self.assertEqual(1, response.json['tenants'][0]['zone_count'])
def test_get_tenant(self):
self.policy({'find_tenants': '@'})
domain = self.create_domain()
tenant = domain.tenant_id
response = self.client.get('/reports/tenants/%s' % tenant)
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('zones', response.json)
self.assertIn('example.com.', response.json['zones'])
|
zeldin/libsigrokdecode | decoders/spiflash/pd.py | Python | gpl-3.0 | 19,082 | 0.002515 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011-2016 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from .lists import *
L = len(cmds)
# Don't forget to keep this in sync with 'cmds' is lists.py.
class Ann:
WRSR, PP, READ, WRDI, RDSR, WREN, FAST_READ, SE, RDSCUR, WRSCUR, \
RDSR2, CE, ESRY, DSRY, REMS, RDID, RDP_RES, CP, ENSO, DP, READ2X, \
EXSO, CE2, BE, REMS2, \
BIT, FIELD, WARN = range(L + 3)
def cmd_annotation_classes():
return tuple([tuple([cmd[0].lower(), cmd[1]]) for cmd in cmds.values()])
def decode_dual_bytes(sio0, sio1):
# Given a byte in SIO0 (MOSI) of even bits and a byte in
# SIO1 (MISO) of odd bits, return a tuple of two bytes.
def combine_byte(even, odd):
result = 0
for bit in range(4):
if even & (1 << bit):
result |= 1 << (bit*2)
if odd & (1 << bit):
result |= 1 << ((bit*2) + 1)
return result
return (combine_byte(sio0 >> 4, sio1 >> 4), combine_byte(sio0, sio1))
def decode_status_reg(data):
# TODO: Additional per-bit(s) self.put() calls with correct start/end.
# Bits[0:0]: WIP (write in progress)
s = 'W' if (data & (1 << 0)) else 'No w'
ret = '%srite operation in progress.\n' % s
# Bits[1:1]: WEL (write enable latch)
s = '' if (data & (1 << 1)) else 'not '
ret += 'Internal write enable latch is %sset.\n' % s
# Bits[5:2]: Block protect bits
# TODO: More detailed decoding (chip-dependent).
ret += 'Block protection bits (BP3-BP0): 0x%x.\n' % ((data & 0x3c) >> 2)
# Bits[6:6]: Continuously program mode (CP mode)
s = '' if (data & (1 << 6)) else 'not '
ret += 'Device is %sin continuously program mode (CP mode).\n' % s
# Bits[7:7]: SRWD (status register write disable)
s = 'not ' if (data & (1 << 7)) else ''
ret += 'Status register writes are %sallowed.\n' % s
return ret
class Decoder(srd.Decoder):
api_version = 2
id = 'spiflash'
name = 'SPI flash'
longname = 'SPI flash chips'
desc = 'xx25 series SPI (NOR) flash chip protocol.'
license = 'gplv2+'
inputs = ['spi']
outputs = ['spiflash']
annotations = cmd_annotation_classes() + (
('bit', 'Bit'),
('field', 'Field'),
('warning', 'Warning'),
)
annotation_rows = (
('bits', 'Bits', (L + 0,)),
('fields', 'Fields', (L + 1,)),
('commands', 'Commands', tuple(range(len(cmds)))),
('warnings', 'Warnings', (L + 2,)),
)
options = (
{'id': 'chip', 'desc': 'Chip', 'default': tuple(chips.keys())[0],
'values': tuple(chips.keys())},
{'id': 'format', 'desc': 'Data format', 'default': 'hex',
'values': ('hex', 'ascii')},
)
def __init__(self):
self.device_id = -1
self.on_end_transaction = None
| self.end_current_transaction()
# Build dict mapping command keys to handler functions. Each
# command in 'cmds' (defined in lists.py) has a | matching
# handler self.handle_<shortname>.
def get_handler(cmd):
s = 'handle_%s' % cmds[cmd][0].lower().replace('/', '_')
return getattr(self, s)
self.cmd_handlers = dict((cmd, get_handler(cmd)) for cmd in cmds.keys())
def end_current_transaction(self):
if self.on_end_transaction is not None: # Callback for CS# transition.
self.on_end_transaction()
self.on_end_transaction = None
self.state = None
self.cmdstate = 1
self.addr = 0
self.data = []
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.chip = chips[self.options['chip']]
self.vendor = self.options['chip'].split('_')[0]
def putx(self, data):
# Simplification, most annotations span exactly one SPI byte/packet.
self.put(self.ss, self.es, self.out_ann, data)
def putf(self, data):
self.put(self.ss_field, self.es_field, self.out_ann, data)
def putc(self, data):
self.put(self.ss_cmd, self.es_cmd, self.out_ann, data)
def device(self):
return device_name[self.vendor].get(self.device_id, 'Unknown')
def vendor_device(self):
return '%s %s' % (self.chip['vendor'], self.device())
def cmd_ann_list(self):
x, s = cmds[self.state][0], cmds[self.state][1]
return ['Command: %s (%s)' % (s, x), 'Command: %s' % s,
'Cmd: %s' % s, 'Cmd: %s' % x, x]
def cmd_vendor_dev_list(self):
c, d = cmds[self.state], 'Device = %s' % self.vendor_device()
return ['%s (%s): %s' % (c[1], c[0], d), '%s: %s' % (c[1], d),
'%s: %s' % (c[0], d), d, self.vendor_device()]
def emit_cmd_byte(self):
self.ss_cmd = self.ss
self.putx([Ann.FIELD, self.cmd_ann_list()])
self.addr = 0
def emit_addr_bytes(self, mosi):
self.addr |= (mosi << ((4 - self.cmdstate) * 8))
b = ((3 - (self.cmdstate - 2)) * 8) - 1
self.putx([Ann.BIT,
['Address bits %d..%d: 0x%02x' % (b, b - 7, mosi),
'Addr bits %d..%d: 0x%02x' % (b, b - 7, mosi),
'Addr bits %d..%d' % (b, b - 7), 'A%d..A%d' % (b, b - 7)]])
if self.cmdstate == 2:
self.ss_field = self.ss
if self.cmdstate == 4:
self.es_field = self.es
self.putf([Ann.FIELD, ['Address: 0x%06x' % self.addr,
'Addr: 0x%06x' % self.addr, '0x%06x' % self.addr]])
def handle_wren(self, mosi, miso):
self.putx([Ann.WREN, self.cmd_ann_list()])
self.state = None
def handle_wrdi(self, mosi, miso):
pass # TODO
def handle_rdid(self, mosi, miso):
if self.cmdstate == 1:
# Byte 1: Master sends command ID.
self.emit_cmd_byte()
elif self.cmdstate == 2:
# Byte 2: Slave sends the JEDEC manufacturer ID.
self.putx([Ann.FIELD, ['Manufacturer ID: 0x%02x' % miso]])
elif self.cmdstate == 3:
# Byte 3: Slave sends the memory type.
self.putx([Ann.FIELD, ['Memory type: 0x%02x' % miso]])
elif self.cmdstate == 4:
# Byte 4: Slave sends the device ID.
self.device_id = miso
self.putx([Ann.FIELD, ['Device ID: 0x%02x' % miso]])
if self.cmdstate == 4:
self.es_cmd = self.es
self.putc([Ann.RDID, self.cmd_vendor_dev_list()])
self.state = None
else:
self.cmdstate += 1
def handle_rdsr(self, mosi, miso):
# Read status register: Master asserts CS#, sends RDSR command,
# reads status register byte. If CS# is kept asserted, the status
# register can be read continuously / multiple times in a row.
# When done, the master de-asserts CS# again.
if self.cmdstate == 1:
# Byte 1: Master sends command ID.
self.emit_cmd_byte()
elif self.cmdstate >= 2:
# Bytes 2-x: Slave sends status register as long as master clocks.
self.es_cmd = self.es
self.putx([Ann.BIT, [decode_status_reg(miso)]])
self.putx([Ann.FIELD, ['Status register']])
self.putc([Ann.RDSR, self.cmd_ann_list()])
self.cmdstate += 1
def handle_rdsr2(self, mosi, miso):
# Read status register 2: Master asserts CS#, sends RDSR2 command,
# reads status register 2 byte. If CS# is kept asserted, the status
# re |
PapenfussLab/Srtools | bin/old/loadSolexa.py | Python | artistic-2.0 | 796 | 0.007538 | #!/usr/bin/env python
"""
loadSolexa.py
Author: Tony Papenfuss
Date: Tue Jun 24 14:27:34 EST 2008
| """
import os, sys
from maq import *
from useful import progressMessage
oFilename = 'tmp/PlatySolexa.txt'
if not os.path.exists(oFilename):
oFile = open(oFilename, 'w')
dataDir = '/Users/papenfuss/databases/platypus/venom/solexa/'
for i,read in enumerate(MaqViewFile(os.path.join(dataDir, 'mapview.txt'), mQ_cutoff=40)):
| if (i % 1000)==0:
progressMessage("# maq %s", i, 28395347)
tokens = str(read).split('\t')
tokens.append(i)
print >> oFile, "|".join([str(x) for x in tokens])
oFile.close()
progressMessage("# maq %s\n", i, 28395347)
os.system("""sqlite3 alignedReads.db '.import "tmp/PlatySolexa.txt" PlatySolexa'""")
|
Spiderlover/Toontown | toontown/parties/DistributedPartyJukeboxActivityAI.py | Python | mit | 549 | 0.010929 | from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPar | tyJukeboxActivityBaseAI import DistributedPartyJukeboxActivityBaseAI
import PartyGlobals
class DistributedPartyJukeboxActivityAI(DistributedPartyJukeboxActivityBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPartyJukeboxActivityAI")
| def __init__(self, air, parent, activityTuple):
DistributedPartyJukeboxActivityBaseAI.__init__(self, air, parent, activityTuple)
self.music = PartyGlobals.PhaseToMusicData
|
be-cloud-be/horizon-addons | server-tools/base_export_manager/models/ir_exports.py | Python | agpl-3.0 | 2,147 | 0 | # -*- coding: utf-8 -*-
# Copyright 2015-2016 Jairo Llopis <jairo.llopis@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, api, fields, models
from openerp.exceptions import ValidationError
class IrExports(models.Model):
_inherit = 'ir.exports'
name = fields.Char(required=True)
resource = fields.Char(
required=False,
readonly=True,
help="Model's technical name.")
model_id = fields.Many2one(
"ir.model",
"Model",
store=True,
domain=[("transient", "=", False)],
compute="_compute_model_id",
inverse="_inverse_model_id",
help="Database model to export.")
@api.multi
@api.depends("resource")
def _compute_model_id(self):
"""Get the model from the resource."""
for s in self:
s.model_id = self._get_model_id(s.resource)
@api.multi
@api.onchange("model_id")
def _inverse_model_id(self):
"""Get the resource from the model."""
for s in self:
s.resource = s.model_id.model
@api.multi
@api.onchange("resource")
def _onchange_resource(self):
"""Void fields if model is changed in a view."""
for s in self:
s.export_fields = False
@api.model
def _get_model_id(self, resource):
"""Return a model object from its technical name.
:param str resource:
Technical name of the model, like ``ir.model``.
"""
return self.env["ir.model"].search([("model", "=", resource)])
@ | api.model
def create(self, vals):
"""Check required values when creating the record.
Odoo's export dialog populates ``resource``, while this module's new
form populates ``model_id``. At least one of them is required to
trigger the methods that fill up the other, so this should fail if
| one is missing.
"""
if not any(f in vals for f in {"model_id", "resource"}):
raise ValidationError(_("You must supply a model or resource."))
return super(IrExports, self).create(vals)
|
obulpathi/bitcoinpy | bitcoinpy/mempool.py | Python | mit | 952 | 0.026261 | # MemPool.py
#
# Distributed under the MIT/X11 software license | , see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import logging
from lib.serialize import uint256_to_shortstr
class MemPool(object):
def __init__(self):
self.pool = | {}
# setup logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
def add(self, tx):
tx.calc_sha256()
hash = tx.sha256
hashstr = uint256_to_shortstr(hash)
if hash in self.pool:
self.log.write("MemPool.add(%s): already known" % (hashstr,))
return False
if not tx.is_valid():
self.log.write("MemPool.add(%s): invalid TX" % (hashstr, ))
return False
self.pool[hash] = tx
self.log.write("MemPool.add(%s), poolsz %d" % (hashstr, len(self.pool)))
return True
def remove(self, hash):
if hash not in self.pool:
return False
del self.pool[hash]
return True
def size(self):
return len(self.pool)
|
alexmogavero/home-assistant | homeassistant/components/device_tracker/unifi.py | Python | apache-2.0 | 3,400 | 0 | """
Support for Unifi WAP controllers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.unifi/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.loader as loader
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.const import CONF_VERIFY_SSL
REQUIREMENTS = ['pyunifi==2.13']
_LOGGER = logging.getLogger(__name__)
CONF_PORT = 'port'
CONF_SITE_ID = 'site_id'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8443
DEFAULT_VERIFY_SSL = True
NOTIFICATION_ID = 'unifi_notification'
NOTIFICATION_TITLE = 'Unifi Device Tracker Setup'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_SITE_ID, default='default'): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def get_scanner(hass, config):
"""Set up the Unifi device_tracker."""
from pyunifi.controller import Controller, APIError
host = config[DOMAIN].get(CONF_HOST)
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
site_id = config[DOMAIN].get(CONF_SITE_ID)
port = config[DOMAIN].get(CONF_PORT)
verify_ssl = config[DOMAIN].get(CONF_VERIFY_SSL)
persistent_notification = loader.get_component('persistent_n | otification')
try:
ctrl = Controller(host, username, password, port, version='v4',
site_id=site_id, ssl_verify=verify_ssl)
except APIError as ex:
_LOGGER.error("Failed to connect to Unifi: %s", ex)
| persistent_notification.create(
hass, 'Failed to connect to Unifi. '
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
return UnifiScanner(ctrl)
class UnifiScanner(DeviceScanner):
"""Provide device_tracker support from Unifi WAP client data."""
def __init__(self, controller):
"""Initialize the scanner."""
self._controller = controller
self._update()
def _update(self):
"""Get the clients from the device."""
from pyunifi.controller import APIError
try:
clients = self._controller.get_clients()
except APIError as ex:
_LOGGER.error("Failed to scan clients: %s", ex)
clients = []
self._clients = {client['mac']: client for client in clients}
def scan_devices(self):
"""Scan for devices."""
self._update()
return self._clients.keys()
def get_device_name(self, mac):
"""Return the name (if known) of the device.
If a name has been set in Unifi, then return that, else
return the hostname if it has been detected.
"""
client = self._clients.get(mac, {})
name = client.get('name') or client.get('hostname')
_LOGGER.debug("Device %s name %s", mac, name)
return name
|
nutils/nutils | nutils/topology.py | Python | mit | 101,558 | 0.014435 | # Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The topology module defines the topology objects, notably the
:class:`StructuredTopology` and :class:`UnstructuredTopology`. Maintaining
strict separation of topological and geometrical information, the topology
represents a set of elements and their interconnectivity, boundaries,
refinements, subtopologies etc, but not their positioning in physical space. The
dimension of the topology represe | nts the dimension of its elements, not that of
the the space they are embedded in.
The primary role of topologies is to form a domain | for :mod:`nutils.function`
objects, like the geometry function and function bases for analysis, as well as
provide tools for their construction. It also offers methods for integration and
sampling, thus providing a high level interface to operations otherwise written
out in element loops. For lower level operations topologies can be used as
:mod:`nutils.element` iterators.
"""
from . import element, elementseq, function, util, parallel, config, numeric, cache, transform, transformseq, warnings, matrix, types, sample, points, _
import numpy, functools, collections.abc, itertools, functools, operator, numbers, pathlib, treelog as log, abc
_identity = lambda x: x
class Topology(types.Singleton):
'topology base class'
__slots__ = 'references', 'transforms', 'opposites', 'ndims'
__cache__ = 'border_transforms', 'simplex', 'boundary', 'interfaces'
@types.apply_annotations
def __init__(self, references:elementseq.strictreferences, transforms:transformseq.stricttransforms, opposites:transformseq.stricttransforms):
assert references.ndims == opposites.fromdims == transforms.fromdims
assert len(references) == len(transforms) == len(opposites)
self.references = references
self.transforms = transforms
self.opposites = opposites
self.ndims = transforms.fromdims
super().__init__()
def __str__(self):
'string representation'
return '{}(#{})'.format(self.__class__.__name__, len(self))
def __len__(self):
return len(self.references)
@property
def elements(self):
warnings.deprecation('Topology.elements is deprecated, use Topology.references and Topology.transforms instead')
return tuple(self)
def __iter__(self):
warnings.deprecation('Topology.__iter__ is deprecated, use Topology.references and Topology.transforms instead')
return iter(map(element.Element, self.references, self.transforms, self.opposites))
def getitem(self, item):
return EmptyTopology(self.ndims)
def __getitem__(self, item):
if numeric.isintarray(item):
item = types.frozenarray(item)
return UnstructuredTopology(self.references[item], self.transforms[item], self.opposites[item])
if not isinstance(item, tuple):
item = item,
if all(it in (...,slice(None)) for it in item):
return self
topo = self.getitem(item) if len(item) != 1 or not isinstance(item[0],str) \
else functools.reduce(operator.or_, map(self.getitem, item[0].split(',')), EmptyTopology(self.ndims))
if not topo:
raise KeyError(item)
return topo
def __invert__(self):
return OppositeTopology(self)
def __or__(self, other):
assert isinstance(other, Topology) and other.ndims == self.ndims
return other if not self \
else self if not other \
else NotImplemented if isinstance(other, UnionTopology) \
else UnionTopology((self,other))
__ror__ = lambda self, other: self.__or__(other)
def __and__(self, other):
keep_self = numpy.array(list(map(other.transforms.contains_with_tail, self.transforms)), dtype=bool)
if keep_self.all():
return self
keep_other = numpy.array(list(map(self.transforms.contains_with_tail, other.transforms)), dtype=bool)
if keep_other.all():
return other
ind_self = types.frozenarray(keep_self.nonzero()[0], copy=False)
ind_other = types.frozenarray([i for i, trans in enumerate(other.transforms) if keep_other[i] and not self.transforms.contains(trans)], dtype=int)
# The last condition is to avoid duplicate elements. Note that we could
# have reused the result of an earlier lookup to avoid a new (using index
# instead of contains) but we choose to trade some speed for simplicity.
references = elementseq.chain([self.references[ind_self], other.references[ind_other]], self.ndims)
transforms = transformseq.chain([self.transforms[ind_self], other.transforms[ind_other]], self.ndims)
opposites = transformseq.chain([self.opposites[ind_self], other.opposites[ind_other]], self.ndims)
return UnstructuredTopology(references, transforms, opposites, ndims=self.ndims)
__rand__ = lambda self, other: self.__and__(other)
def __add__(self, other):
return self | other
def __contains__(self, element):
warnings.deprecation("Topology.__contains__ is deprecated, use 'elem.transform in topo.transforms' instead")
try:
ielem = self.transforms.index(element.transform)
except ValueError:
return False
return self.references[ielem] == element.reference and self.opposites[ielem] == element.opposite
def __sub__(self, other):
assert isinstance(other, Topology) and other.ndims == self.ndims
return other.__rsub__(self)
def __rsub__(self, other):
assert isinstance(other, Topology) and other.ndims == self.ndims
return other - other.subset(self, newboundary=getattr(self,'boundary',None))
def __mul__(self, other):
return ProductTopology(self, other)
@property
def edict(self):
'''transform -> ielement mapping'''
warnings.deprecation('edict is deprecated, use Topology.transforms.index instead')
return {trans: i for i, trans in enumerate(self.transforms)}
@property
def border_transforms(self):
indices = set()
for btrans in self.boundary.transforms:
try:
ielem, tail = self.transforms.index_with_tail(btrans)
except ValueError:
pass
else:
indices.add(ielem)
return self.transforms[numpy.array(sorted(indices), dtype=int)]
@property
def refine_iter(self):
topo = self
while True:
yield topo
topo = topo.refined
def basis(self, name, *args, **kwargs):
'''
Create a basis.
'''
if self.ndims == 0:
return function.PlainBasis([[1]], [[0]], 1, self.transforms)
split = name.split('-', 1)
if len(split) == 2 and split[0] in ('h', 'th'):
name = split[1] # default to non-hierarchical bases
if split[0] == 'th':
kwargs.pop('truncation_tolerance', None)
f = getattr(self, 'basis_' + name)
return f(*args, **kwargs)
def sample(self, ischeme, degree):
'Create sample.'
points = [ischeme(reference, degree) for reference in self.references] if callable(ischeme) \
else self.references.getpoints(ischeme, degree)
offset = numpy.cumsum([0] + [p.npoints for p in points])
return sample.Sample((self.transforms, self.opposites), points, map(numpy.arange, offset[:-1], offset[1:]))
@util.single_or_multiple
def integrate_elementwise(self, funcs, *, asfunction=False, **kwargs):
'element-wis |
xavfernandez/pip | tests/data/src/pep518_twin_forkbombs_first-234/setup.py | Python | mit | 143 | 0 | from se | tuptools import setup
setup(name='pep518_twin_forkbombs_first',
version='234',
py_modules=['pep518_twin_forkbombs_ | first'])
|
edx-solutions/edx-platform | common/test/acceptance/fixtures/xqueue.py | Python | agpl-3.0 | 1,436 | 0.000696 | """
Fixture to configure XQueue response.
"""
import json
import requests
from common.test.acceptance.fixtures import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, pattern, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submissio | n that contains the string
`pattern`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._pattern = pattern
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
| url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
if not response.ok:
raise XQueueResponseFixtureError(
u"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._pattern, self._response_dict))
|
svilaa/item_database | item_database/iitem_database/admin.py | Python | gpl-3.0 | 366 | 0.005464 | from django.contrib import admin
from iitem_database.models import Item, | ItemClass, Area, Creature, Drops, Found, UserItems, ItemReview
admin.site.register(Item)
admin.site.register(ItemClass)
admin.site.register(Area)
admin.site.register(Creature)
admin.site.register(Drops)
admin.site.register(Found)
admin.site.register(UserItems)
admin.site.regist | er(ItemReview) |
facebookresearch/ParlAI | parlai/zoo/light_whoami/rpa_reranker.py | Python | mit | 394 | 0 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is | licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RPA Re-Ranker Model.
"""
from parlai.zoo.light_whoami.whoami_download import download_with_model_type
def download(datapath):
download_with_model_type(datapath, | 'rpa_reranker', 'v1.0')
|
SyrakuShaikh/python | learning/a_byte_of_python/ds_reference.py | Python | gpl-3.0 | 712 | 0 | print('Simple Assignment')
shoplist = ['apple', 'mango', 'carrot', 'banana']
# mylist is just another name pointing to the same object!
mylist = shoplist
# I purchased the first item, so I remove it from the list
del shoplist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
# Notice that both shoplist and mylist both print
# the same list without the 'apple' confirming that
# they point to the same object
print('Copy by making a full slice')
# Make a copy by doing a full slice
mylist = shoplist[:]
mylist1 = shoplist.copy()
# Remove first item
del mylist[0]
print('shoplist is', shoplist)
print('mylist | is', mylist)
del mylist1[1] |
print('shoplist is', shoplist)
print('mylist is', mylist1)
|
dmcc/bllip-parser | python/tests/test_reranking_parser.py | Python | apache-2.0 | 68,004 | 0.003 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import unittest
from bllipparser import Sentence, tokenize, RerankingParser, Tree
from bllipparser.RerankingParser import (NBestList, ScoredParse,
get_unified_model_parameters)
# throughout: reprs are called to ensure they don't crash, but we don't
# rely on their value
class MiscToolTests(unittest.TestCase):
def test_sentence(self):
s = Sentence('Hi there.')
self.assertEqual(s.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s), 3)
repr(s)
s2 = Sentence(s)
self.assertEqual(s2.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s2), 3)
s3 = Sentence(s.sentrep)
self.assertEqual(s3.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s3), 3)
def test_sentences_from_string(self):
sentences = Sentence.sentences_from_string('<s> Test </s>')
self.assertEqual(len(sentences), 1)
self.assertEqual(sentences[0].tokens(), ['Test'])
self.assertEqual(sentences[0].tokens()[0], 'Test')
sentences2 = Sentence.sentences_from_string('''<s> Sentence 1 </s>
<s> Can't have just one. </s>
<s last> The last sentence </s>
<s> Just kidding. </s>''')
self.assertEqual(len(sentences2), 4)
self.assertEqual(sentences2[0].tokens(), ['Sentence', '1'])
self.assertEqual(sentences2[1].tokens(), ['Can', "n't", 'have',
'just', 'one', '.'])
self.assertEqual(sentences2[2].tokens(), ['The', 'last', 'sentence'])
self.assertEqual(sentences2[3].tokens(), ['Just', 'kidding', '.'])
def test_sentences_from_file(self):
sentences = Sentence.sentences_from_file('sample-text/fails.sgml')
self.assertEqual(len(sentences), 4)
self.assertEqual(sentences[0].tokens(), 'A -RSB- -LSB- B -RSB- -LSB- C -RSB- -LSB- D -RSB- -LSB- A -RSB- -LSB- B -RSB- -LSB- C -RSB- -LSB- D -RSB- -LSB- E -RSB- -LSB- G -RSB- -LSB- F -RSB- -LSB- G -RSB- -LSB- H -RSB- -LSB- I -RSB- -LSB- J -RSB- -LSB- K -RSB- -LSB- L -RSB- -LSB- M -RSB- -LSB- N -RSB- -LSB- N -RSB- .'.split())
self.assertEqual(sentences[1].tokens(), '# ! ? : -'.split())
self.assertEqual(sentences[2].tokens(),
'744 644 413 313 213 231 131 544 444 344 543 443 613 513 921 821 721 621 521 001'.split())
self.assertEqual(sentences[3].tokens(), list(map(str, range(1, 501))))
def test_tokenizer(self):
tokens1 = tokenize("Tokenize this sentence, please.")
self.assertEqual(tokens1, ['Tokenize', 'this', 'sentence', ',',
'please', '.'])
tokens2 = tokenize("Whoa! What's going on here? @($*")
self.assertEqual(tokens2, ['Whoa', '!', 'What', "'s", 'going',
'on', 'here', '?', '@', '-LRB-', '$', '*'])
# arguably, this is a bug as 3 should have been separated from -LSB-
tokens3 = tokenize("You can't do that (or can you?). [3]")
self.assertEqual(tokens3, ['You', 'can', "n't", 'do', 'that',
'-LRB-', 'or', 'can', 'you', '?',
'-RRB-', '.', '-LSB-3', '-RSB-'])
def test_unified_model_params(self):
self.assertRaises(IOError, get_unified_model_parameters,
'/path/to/nowhere/hopefully')
self.assertRaises(IOError, RerankingParser.from_unified_model_dir,
'/path/to/nowhere/hopefully')
# rest is hard to test given that we can only load one model...
class RerankingParserTests(unittest.TestCase):
def test_1_loading_errors(self):
# parser loading errors
rrp = RerankingParser()
repr(rrp)
self.assertRaises(ValueError, rrp.load_parser_model,
'/path/to/nowhere/hopefully')
self.a | ssertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.load_parser_model, u'\u2602')
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.load_reranker_model, u'\u2602',
'second-stage/models/ec50spfinal/cvlm-l1c10P1-'
'weights.gz')
self.assertRaises(ValueError, rrp.load_reranker_model,
'second-stag | e/models/ec50spfinal/features.gz',
u'\u2602')
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
# tree function loading errors
tree = Tree('(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
self.assertRaises(ValueError, tree.evaluate, tree)
self.assertRaises(ValueError, tree.log_prob)
self.assertRaises(ValueError, tree.head)
s = Sentence('(Sentence for when the parser is not loaded)')
self.assertRaises(ValueError, s.independent_tags)
def test_2_basics(self):
rrp = RerankingParser()
# make sure we're starting fresh
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error,
'auto')
rrp.load_parser_model('first-stage/DATA/EN')
repr(rrp)
self.assertEqual(rrp.check_models_loaded_or_error(False), False)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
rrp.load_reranker_model('second-stage/models/ec50spfinal/features.gz',
'second-stage/models/ec50spfinal/cvlm-'
'l1c10P1-weights.gz')
repr(rrp)
self.assertEqual(rrp.check_models_loaded_or_error(False), False)
self.assertEqual(rrp.check_models_loaded_or_error(True), True)
self.assertEqual(rrp.check_models_loaded_or_error('auto'), True)
self.assertEqual(rrp.parser_model_dir, 'first-stage/DATA/EN')
self.assertEqual(rrp.simple_parse('This is simple.'),
'(S1 (S (NP (DT This)) (VP (AUX is) (ADJP '
'(JJ simple))) (. .)))')
nbest_list = rrp.parse('This is a sentence.')
self.failUnless(isinstance(nbest_list, NBestList))
self.assertNBestListStringsAlmostEqual(str(nbest_list).strip(), '''
13 x
-8.88655845608 -30.3981669701
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-13.936145728 -46.4346864304
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-14.3607122818 -47.4390055933
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-14.7026007585 -41.4723634172
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-15.3583543915 -48.567244735
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-19.285724575 -56.2161267587
(S1 (SBARQ (WHNP (DT This)) (SQ (AUX is) (NP (DT a) (NN sentence))) (. .)))
-19.7521880305 -57.5088828776
(S1 (S (NP (NNP This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-20.1767545843 -58.5132020405
(S1 (S (NP (NN This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-20.2330660538 -55.5759876981
(S1 (SBARQ (WHNP (DT This)) (SQ (VP (AUX is) (NP (DT a) (NN sentence)))) (. .)))
-20.3467824313 -59.0747445934
(S1 (S (ADVP (DT This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-21.174396694 -59.6414411821
(S1 |
canvasnetworks/canvas | website/canvas/migrations/0163_auto__add_field_userinfo_avatar.py | Python | bsd-3-clause | 21,325 | 0.007362 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserInfo.avatar'
db.add_column('canvas_userinfo', 'avatar', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['canvas.Content'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserInfo.avatar'
db.delete_column('canvas_userinfo', 'avatar_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp' | : ('canvas.util.UnixTimestampField', [], {})
},
'can | vas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
|
amiraliakbari/sharif-mabani-python | by-session/ta-921/j3/number1.py | Python | mit | 267 | 0.007491 | def devisors(n):
| r = 0
i = 1
while i <= n:
if n % i == 0:
r += 1
i += 1
return r
def is_prime(n):
x = devisors(n)
if x == 2:
return True
else:
return False
print i | s_prime(3)
|
ymoch/pyjpmesh | test/test_angle.py | Python | mit | 2,197 | 0 | """
Tests for jpmesh.angle.
"""
import unittest
from jpmesh import Angle
class TestAngle(unittest.TestCase):
"""Tests for jpmesh.Angle.
"""
def test_properties(self):
"""Test for properties.
"""
millisecond = 3600000
angle = Angle.from_millisecond(millisecond)
self.assertEqual(angle.degree, float(millisecond) / 60 / 60 / 1000)
self.assertEqual(angle.minute, float(millisecond) / 60 / 1000)
self.assertEqual(angle.second, float(millisecond) / 1000)
self.assertEqual(angle.millisecond, float(millisecond))
def test_operators | (self):
"""Test for operators.
"""
angle1 = Angle.from_millisecond(1.0)
angle2 = Angle.from_millisecond(1.0)
angle3 = Angle.from_millisecond(2.0)
self.assertEqual(
(angle1 + angle2).degree, angle1.degree + angle2.degree)
self.assertEqual(
(angle1 - angle2).degree, angle1.degree - angle2.degree)
self.assertEqual((angle1 * 2).degree, angle1.degree * 2)
self.assertEqual((angle1 / 2).degree, angle1.degree / | 2)
self.assertEqual((angle1 / 2.0).degree, angle1.degree / 2.0)
self.assertEqual((+angle1).degree, +angle1.degree)
self.assertEqual((-angle1).degree, -angle1.degree)
self.assertEqual(abs(angle1).degree, abs(angle1.degree))
self.assertTrue(angle1 == angle2)
self.assertFalse(angle1 == angle3)
self.assertFalse(angle1 != angle2)
self.assertTrue(angle1 != angle3)
self.assertFalse(angle1 < angle2)
self.assertTrue(angle2 < angle3)
self.assertTrue(angle1 <= angle2)
self.assertTrue(angle2 <= angle3)
self.assertFalse(angle3 <= angle2)
self.assertFalse(angle2 > angle1)
self.assertTrue(angle3 > angle2)
self.assertTrue(angle2 >= angle1)
self.assertTrue(angle3 >= angle2)
self.assertFalse(angle2 >= angle3)
# Call __div__ and __truediv__ expressly.
self.assertEqual(
angle1.for_test_div(2.0).degree, angle1.degree / 2.0)
self.assertEqual(
angle1.for_test_truediv(2.0).degree, angle1.degree / 2.0)
|
mhorn71/StarbaseMini | ui/mainwindow.py | Python | gpl-2.0 | 30,116 | 0.002922 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(905, 651)
MainWindow.setMinimumSize(QtCore.QSize(0, 651))
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setContentsMargins(9, 9, 9, 9)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setMinimumSize(QtCore.QSize(621, 61))
self.groupBox_3.setMaximumSize(QtCore.QSize(16777215, 111))
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_3.setContentsMargins(9, 9, 9, 9)
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.statusMessage = QtWidgets.QTableWidget(self.groupBox_3)
self.statusMessage.setStyleSheet("QTableWidget {\n"
" font-size: 11px;\n"
" background-color: \'#FFFFE0\';\n"
"}\n"
"\n"
"QTableWidget QHeaderView {\n"
" font-size: 11px;\n"
"}\n"
"\n"
"QTableWidget QHeaderView::section {\n"
" height: 18px;\n"
"}")
self.statusMessage.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.statusMessage.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.statusMessage.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.statusMessage.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.statusMessage.setWordWrap(False)
self.statusMessage.setColumnCount(5)
self.statusMessage.setObjectName("statusMessage")
self.statusMessage.setRowCount(0)
self.statusMessage.horizontalHeader().setCascadingSectionResizes(True)
self.statusMessage.horizontalHeader().setDefaultSectionSize(100)
self.statusMessage.horizontalHeader().setMinimumSectionSize(56)
self.statusMessage.horizontalHeader().setStretchLastSection(False)
self.statusMessage.verticalHeader().setVisible(False)
self.statusMessage.verticalHeader().setStretchLastSection(False)
self.gridLayout_3.addWidget(self.statusMessage, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_3, 0, 0, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setMinimumSize(QtCore.QSize(147, 0))
self.groupBox_2.setMaximumSize(QtCore.QSize(200, 16777215))
self.groupBox_2.setObjectName("groupBox_2")
self.layoutWidget = QtWidgets.QWidget(self.groupBox_2)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 131, 581))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.toolBox = QtWidgets.QToolBox(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toolBox.sizePolicy().hasHeightForWidth())
self.toolBox.setSizePolicy(sizePolicy)
self.toolBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.toolBox.setStyleSheet("QToolBox::tab {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,\n"
" stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n"
" border-radius: 5px;\n"
" color: darkgray;\n"
"}\n"
"\n"
"QToolBox::tab: | selected { \n"
" font: italic;\n"
" color: green;\n"
"}\n"
"\n"
"QToolBox::ta | b:!selected {\n"
" color: black;\n"
"}")
self.toolBox.setObjectName("toolBox")
self.UserCtrl = QtWidgets.QWidget()
self.UserCtrl.setGeometry(QtCore.QRect(0, 0, 129, 521))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.UserCtrl.sizePolicy().hasHeightForWidth())
self.UserCtrl.setSizePolicy(sizePolicy)
self.UserCtrl.setObjectName("UserCtrl")
self.layoutWidget1 = QtWidgets.QWidget(self.UserCtrl)
self.layoutWidget1.setGeometry(QtCore.QRect(0, 0, 131, 259))
self.layoutWidget1.setObjectName("layoutWidget1")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget1)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.layoutWidget1)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.moduleCombobox = QtWidgets.QComboBox(self.layoutWidget1)
self.moduleCombobox.setObjectName("moduleCombobox")
self.gridLayout.addWidget(self.moduleCombobox, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.layoutWidget1)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.commandCombobox = QtWidgets.QComboBox(self.layoutWidget1)
self.commandCombobox.setObjectName("commandCombobox")
self.gridLayout.addWidget(self.commandCombobox, 3, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.layoutWidget1)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.commandParameter = QtWidgets.QLineEdit(self.layoutWidget1)
self.commandParameter.setMaxLength(15)
self.commandParameter.setObjectName("commandParameter")
self.gridLayout.addWidget(self.commandParameter, 5, 0, 1, 1)
self.choicesComboBox = QtWidgets.QComboBox(self.layoutWidget1)
self.choicesComboBox.setObjectName("choicesComboBox")
self.gridLayout.addWidget(self.choicesComboBox, 6, 0, 1, 1)
self.executeButton = QtWidgets.QPushButton(self.layoutWidget1)
self.executeButton.setStyleSheet("")
self.executeButton.setObjectName("executeButton")
self.gridLayout.addWidget(self.executeButton, 7, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 8, 0, 1, 1)
self.toolBox.addItem(self.UserCtrl, "")
self.UserInst = QtWidgets.QWidget()
self.UserInst.setGeometry(QtCore.QRect(0, 0, 129, 521))
self.UserInst.setObjectName("UserInst")
self.layoutWidget2 = QtWidgets.QWidget(self.UserInst)
self.layoutWidget2.setGeometry(QtCore.QRect(0, 0, 128, 331))
self.layoutWidget2.setObjectName("layoutWidget2")
self.gridLayout_5 = QtWidgets.QGridLayout(self.layoutWidget2)
self.gridLayout_5.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setHorizontalSpacing(6)
self.gridLayout_5.setObjectName("gridLayout_5")
self.channel2Button = QtWidgets.QPushButton(self.layoutWidget2)
sizePolicy = QtWidgets.QSizePolicy |
lektor/lektor-archive | lektor/devcli.py | Python | bsd-3-clause | 3,718 | 0 | import os
import sys
import click
from .packages import get_package_info, register_package, publish_package
from .cli import pass_context
def ensure_plugin():
here = os.getcwd()
if not os.path.isfile(os.path.join(here, 'setup.py')):
raise click.UsageError('This command must be run in a '
'Lektor plugin folder')
info = get_package_info(here)
if not info['name'].lower().startswith('lektor-'):
raise click.UsageError('Python package is misnamed. Needs to start '
'with lektor-')
return info
@click.group(short_help='Development commands.')
def cli():
"""Development commands for Lektor.
This provides various development support commands for Lektor. This is
primarily useful for Lektor plugin development but also if you want to
extend Lektor itself. Additional functionality can | be unlocked by
exporting the `LEKTOR_DEV=1` environment variable.
"""
@cli.command('shell', short_help='Starts a python shell.')
@pass_context
def shell_cmd(ctx):
"""Starts a Python shell in the context of a Lektor project.
This is particularly useful for debugging plugins and to explore the
API. To quit the shell just use `quit()`. Within the shell various
utilities are available right from the get-go for you.
\b
- `project` | : the loaded project as object.
- `env`: an environment for the loaded project.
- `pad`: a database pad initialized for the project and environment
that is ready to use.
"""
ctx.load_plugins()
import code
from lektor.db import F, Tree
from lektor.builder import Builder
banner = 'Python %s on %s\nLektor Project: %s' % (
sys.version,
sys.platform,
ctx.get_env().root_path,
)
ns = {}
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ns)
pad = ctx.get_env().new_pad()
ns.update(
project=ctx.get_project(),
env=ctx.get_env(),
pad=pad,
tree=Tree(pad),
config=ctx.get_env().load_config(),
make_builder=lambda: Builder(ctx.get_env().new_pad(),
ctx.get_default_output_path()),
F=F
)
code.interact(banner=banner, local=ns)
@cli.command('publish-plugin', short_help='Publish a plugin to PyPI.')
def publish_plugin_cmd():
"""Publishes the current version of the plugin in the current folder.
This generally requires that your setup.py has at least the bare minimum
configuration for valid publishing to PyPI.
"""
info = ensure_plugin()
for key in 'author', 'author_email', 'license', 'url':
if not info[key]:
raise click.UsageError('Cannot publish plugin without setting '
'"%s" in setup.py.' % key)
register_package(info['path'])
publish_package(info['path'])
@cli.command('new-plugin', short_help='Creates a new plugin')
@click.option('--path', type=click.Path(), help='The destination path')
@click.argument('plugin_name', required=False)
@pass_context
def new_plugin(ctx, **defaults):
"""This command creates a new plugin.
This will present you with a very short wizard that guides you through
creation of a new plugin. At the end of it, it will create a plugin
in the packages folder of the current project or the path you defined.
This is the fastest way to creating a new plugin.
"""
from .quickstart import plugin_quickstart
project = ctx.get_project(silent=True)
plugin_quickstart(defaults, project=project)
|
cloudysunny14/faucet | client/rest.py | Python | apache-2.0 | 8,791 | 0.002616 | #!/usr/bin/env python
#
# Copyright 2014 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pkg_resources
import socket
import ssl
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('client requires urllib3.')
SDK_VERSION = "0.1.0"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
self.close()
def __exit__(self, typ, value, traceback):
self.close()
def read(self, amt=None):
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
if self.is_closed:
return
while self.read(RESTResponse.BLOCKSIZE):
pass
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4,
maxsize=max_reusable_connections,
block=False,
timeout=60.0,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'faucet/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = urllib.urlencode(post_params)
| headers["Content-type"] = "application/x-www-form-urlencoded"
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
for key, value in headers.items():
if isinstance(value, basestring) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
| (key, value))
try:
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r)
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status != 200:
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def POST_BODY(self, url, params=None, headers=None, body=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url, body=body,
post_params=params, headers=headers,
raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
return cls.IMPL.PUT(*n, **kw)
@classmethod
def POST_BODY(cls, *n, **kw):
return cls.IMPL.POST_BODY(*n, **kw)
class RESTSocketError(socket.error):
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
class _ErrorResponse__doc__(Exception):
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
def __init__(self, http_resp, body):
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.e |
BYU-ODH/byu-hummedia-api | hummedia/test/test_vtt.py | Python | mpl-2.0 | 9,589 | 0.023777 | import pytest
import io
import json
import sys
from .. import config
from .. import vtt
reload(sys)
sys.setdefaultencoding('utf-8')
def test_from_srt(ASSETS):
f = io.BytesIO()
vtt.from_srt(ASSETS + 'subs.srt', f)
compare = open(ASSETS + 'subs.vtt', 'r')
assert f.getvalue() == compare.read()
def test_from_srt_bom(ASSETS):
f = io.BytesIO()
vtt.from_srt(ASSETS + 'subs-bom.srt', f)
compare = open(ASSETS + 'subs.vtt', 'r')
assert f.getvalue() == compare.read()
def test_from_srt_file(ASSETS):
i = open(ASSETS + 'subs.srt')
o = io.BytesIO()
vtt.from_srt(i, o)
compare = open(ASSETS + 'subs.vtt', 'r')
v = o.getvalue().decode('utf8')
w = compare.read().decode('utf8')
assert v == w
def test_from_srt_file_tricky_decoding(ASSETS):
i = open(ASSETS + 'tricky-decoding.srt')
o = io.BytesIO()
try:
vtt.from_srt(i, o) # TODO: compare output with known data
except UnicodeDecodeError:
assert False, "Could not accurately decode srt file."
def test_from_bad_srt(ASSETS):
i = open(ASSETS + 'fake.srt')
o = io.BytesIO()
with pytest.raises(Exception):
vtt.from_srt(i, o)
def test_iso_8859_srt(ASSETS):
i = open(ASSETS + 'ISO-8859.srt')
o = io.BytesIO()
vtt.from_srt(i, o)
compare = open(ASSETS + 'utf8.vtt', 'r')
assert o.getvalue() == compare.read()
def test_special_chars(ASSETS):
i = open(ASSETS + 'special-chars.srt')
o = io.BytesIO()
vtt.from_srt(i, o)
compare = open(ASSETS + 'special-chars.vtt', 'r')
assert o.getvalue() == compare.read()
def test_shift_time(ASSETS):
f = io.BytesIO()
vtt.shift_time(ASSETS + 'subs.vtt', f, 10)
compare = open(ASSETS + 'subs+10.vtt', 'r')
assert f.getvalue() == compare.read()
def test_shift_time_file(ASSETS):
i = open(ASSETS + 'subs.vtt', 'r')
o = io.BytesIO()
vtt.shift_time(i, o, 10)
i.close()
compare = open(ASSETS + 'subs+10.vtt', 'r')
assert o.getvalue() == compare.read()
def test_validate_vtt(ASSETS):
assert vtt.is_valid(ASSETS + 'subs.vtt') is True
def test_invalid_vtt(ASSETS):
assert vtt.is_valid(ASSETS + 'invalid.vtt') is False
def test_upload_srt(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs.srt') as f:
data = {"subtitle": (f, 'subs.srt')}
response = app.post('/video', data=data)
assert response.status_code == 200
data = json.loads(response.data)
file = data['ma:hasRelatedResource'][0]['@id']
assert file.split('.')[-1] == 'vtt'
filename = file.split('/')[-1]
file = open(config.SUBTITLE_DIRECTORY + filename, 'r')
orig = open(ASSETS + 'subs.vtt', 'r')
assert orig.read() == file.read()
def test_upload_vtt(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'subs.vtt'),
'name': "The One True Subtitle",
'lang': 'en'
}
response = app.post('/video', data=data)
assert response.status_code == 200
data = json.loads(response.data)
file = data['ma:hasRelatedResource'][0]['@id']
assert data['ma:hasRelatedResource'][0]['name'] == 'The One True Subtitle'
assert data['ma:hasRelatedResource'][0]['language'] == 'en'
assert file.split('.')[-1] == 'vtt'
filename = file.split('/')[-1]
file = open(config.SUBTITLE_DIRECTORY + filename, 'r')
orig = open(ASSETS + 'subs.vtt', 'r')
assert orig.read() == fi | le.read()
def test_upload_vtt_as_student_with_write_access(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
v = app.post('/video')
data = json.loads(v.data)
vid_pid = data[ | 'pid']
c = app.post('/collection', data=json.dumps({}), headers={'Content-Type': 'application/json'})
data = json.loads(c.data)
col_pid = data['pid']
# attach video to collection
membership = [{"collection":{"id":col_pid,"title":"Something"},"videos":[vid_pid]}]
membership_result = app.post('/batch/video/membership', data=json.dumps(membership), headers={'Content-Type': "application/json"})
assert membership_result.status_code is 200
# now grant write access to the TA
patch = {"dc:rights": {"read": [ACCOUNTS['STUDENT']['username']], "write": [ACCOUNTS['STUDENT']['username']]}}
result = app.patch('/collection/' + col_pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
assert result.status_code is 200
app.login(ACCOUNTS['STUDENT'])
response = None
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'subs.vtt'),
'name': "The One True Subtitle",
'lang': 'en'
}
response = app.patch('/video/' + vid_pid, data=data)
assert response.status_code == 200
data = json.loads(response.data)
file = data['ma:hasRelatedResource'][0]['@id']
assert data['ma:hasRelatedResource'][0]['name'] == 'The One True Subtitle'
assert data['ma:hasRelatedResource'][0]['language'] == 'en'
assert file.split('.')[-1] == 'vtt'
filename = file.split('/')[-1]
file = open(config.SUBTITLE_DIRECTORY + filename, 'r')
orig = open(ASSETS + 'subs.vtt', 'r')
assert orig.read() == file.read()
def test_upload_invalid_vtt(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS['SUPERUSER'])
response = None
with open(ASSETS + 'invalid.vtt') as f:
data = {
'subtitle': (f, 'subs.vtt'),
'name': "The One True Subtitle",
'lang': 'en'
}
response = app.post('/video', data=data)
assert response.status_code == 400
def test_upload_multi_period_vtt(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'has.lots.of.periods.vtt')
}
response = app.post('/video', data=data)
assert response.status_code == 200
def test_upload_bad_extension_subtitles(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
response = None
with pytest.raises(Exception) as e:
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'bad.extension')
}
app.post('/video', data=data)
assert 'Extension' in str(e)
def test_upload_duplicate_named_subtitles(ASSETS, ACCOUNTS, app):
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs+10.vtt') as f:
data = {"subtitle": (f, 'subs.vtt')}
response = app.post('/video', data=data)
with open(ASSETS + 'subs.vtt') as f:
rjson = json.loads(response.data)
data = {"subtitle": (f, 'subs.vtt')}
response = app.patch('/video/' + rjson['pid'], data=data)
assert response.status_code == 200
data = json.loads(response.data)
assert len(data['ma:hasRelatedResource']) == 2
file1 = data['ma:hasRelatedResource'][0]['@id']
file2 = data['ma:hasRelatedResource'][1]['@id']
assert file1 != file2
def test_delete_subtitles(ASSETS, ACCOUNTS, app):
import os
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'subs.vtt')
}
response = app.post('/video', data=data)
data = json.loads(response.data)
pid = data['pid']
file = data['ma:hasRelatedResource'][0]['@id']
filename = file.split('/')[-1]
response = app.delete('/text/' + filename)
assert response.status_code == 200, "Could not delete file"
response = app.delete('/text/' + filename)
assert response.status_code == 404, "Deleted file not returning 404"
response = app.get('/video/' + pid)
data = json.loads(response.data)
assert len(data['ma:hasRelatedResource']) == 0, "RelatedResource not deletd"
filepath = config.SUBTITLE_DIRECTORY + filename
assert os.path.isfile(filepath) == False, "File still exists on server."
def test_delete_subtitles(ASSETS, ACCOUNTS, app):
import os
app.login(ACCOUNTS["SUPERUSER"])
response = None
with open(ASSETS + 'subs.vtt') as f:
data = {
'subtitle': (f, 'subs.vtt')
}
response = app.post('/video', data=data)
data = json.loads(response.data)
pid = data['pid']
file = data['ma:hasRelatedResource'][0]['@id']
filename = file.split('/')[-1]
response = app.delete('/text/' + filename)
assert response.status_code == 200, "Invalid status code " + str( |
dermoth/gramps | gramps/plugins/drawreport/drawplugins.gpr.py | Python | gpl-2.0 | 7,435 | 0.006187 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.plug._pluginreg import *
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
MODULE_VERSION="5.1"
# this is the default in gen/plug/_pluginreg.py: plg.require_active = True
#------------------------------------------------------------------------
#
# Ancestor Tree
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'ancestor_chart,BKI'
plg.name = _("Ancestor Chart")
plg.description = _("Produces a graphical ancestral chart")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'ancestortree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'AncestorTree'
plg.optionclass = 'AncestorTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'ancestor_chart'
plg.name = _("Ancestor Tree")
plg.description = _("Produces a graphical ancestral tree")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'ancestortree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'AncestorTree'
plg.optionclass = 'AncestorTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Calendar
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'calendar'
plg.name = _("Calendar")
plg.description = _("Produces a graphical calendar")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'calendarreport.py'
plg.ptype = REPORT
plg.authors = ["Douglas S. Blank"]
plg.authors_email = ["dblank@cs.brynmawr.edu"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'Calendar'
plg.optionclass = 'CalendarOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Descendant Tree
#
#--------------------------------------- | ---------------------------------
plg = newplugin()
plg.id = 'descend_c | hart,BKI'
plg.name = _("Descendant Chart")
plg.description = _("Produces a graphical descendant chart")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'descend_chart'
plg.name = _("Descendant Tree")
plg.description = _("Produces a graphical descendant tree")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Family Descendant Tree
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'family_descend_chart,BKI'
plg.name = _("Family Descendant Chart")
plg.description = _("Produces a graphical descendant chart around a family")
plg.version = '1.0'
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.category = CATEGORY_DRAW
plg.gramps_target_version = MODULE_VERSION
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.require_active = True
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_BKI]
plg = newplugin()
plg.id = 'family_descend_chart'
plg.name = _("Family Descendant Tree")
plg.description = _("Produces a graphical descendant tree around a family")
plg.version = '1.0'
plg.status = STABLE
plg.fname = 'descendtree.py'
plg.ptype = REPORT
plg.category = CATEGORY_DRAW
plg.gramps_target_version = MODULE_VERSION
plg.authors = ["Craig J. Anderson"]
plg.authors_email = ["ander882@hotmail.com"]
plg.require_active = True
plg.reportclass = 'DescendTree'
plg.optionclass = 'DescendTreeOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Fan Chart
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'fan_chart'
plg.name = _("Fan Chart")
plg.description = _("Produces fan charts")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'fanchart.py'
plg.ptype = REPORT
plg.authors = ["Donald N. Allingham"]
plg.authors_email = ["don@gramps-project.org"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'FanChart'
plg.optionclass = 'FanChartOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Statistics Charts
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'statistics_chart'
plg.name = _("Statistics Charts")
plg.description = _("Produces statistical bar and pie charts of the people "
"in the database")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'statisticschart.py'
plg.ptype = REPORT
plg.authors = ["Eero Tamminen"]
plg.authors_email = [""]
plg.category = CATEGORY_DRAW
plg.reportclass = 'StatisticsChart'
plg.optionclass = 'StatisticsChartOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
plg.require_active = False
#------------------------------------------------------------------------
#
# Timeline Chart
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'timeline'
plg.name = _("Timeline Chart")
plg.description = _("Produces a timeline chart.")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'timeline.py'
plg.ptype = REPORT
plg.authors = ["Donald N. Allingham"]
plg.authors_email = ["don@gramps-project.org"]
plg.category = CATEGORY_DRAW
plg.reportclass = 'TimeLine'
plg.optionclass = 'TimeLineOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_BKI, REPORT_MODE_CLI]
|
eduNEXT/edx-platform | lms/__init__.py | Python | agpl-3.0 | 959 | 0.003128 | """
Celery needs to be loaded when the cms modules are so that task
registration and discovery can work correctly.
"""
# We monkey patch Kombu's entrypoints listing because scanning through this
# accounts for the majority of LMS/Studio startup time for tests, and we don't
# use custom Kombu serializers (which is what this is for). Sti | ll, this is
# pretty evil, and should be taken out when we update Celery to the next version
# where it looks like this method of custom serialization has been removed.
#
# FWIW, this is identical behavior to what happens in Kombu if pkg_resources
# isn't av | ailable.
import kombu.utils
kombu.utils.entrypoints = lambda namespace: iter([])
# This will make sure the app is always imported when Django starts so
# that shared_task will use this app, and also ensures that the celery
# singleton is always configured for the LMS.
from .celery import APP as CELERY_APP # lint-amnesty, pylint: disable=wrong-import-position
|
edljk/Mosek.jl | deps/src/mosek/7/tools/examples/python/simple.py | Python | mit | 1,730 | 0.015029 | #
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: simple.py
#
# Purpose: Demonstrates a very simple example using MOSEK by
# reading a problem file, solving the problem and
# writing the solution to a file.
#
import mosek
import sys
def streamprinter(msg):
sys.stdout.write (msg)
sys.stdout.flush ()
if len(sys.argv) <= 1:
print ("Missing argument, syntax is:")
print (" simple inputfile [ solutionfile ]")
else:
# Create the mosek environment.
env = mosek.Env ()
# Create a task object linked with the environment env.
# We create it with 0 variables and 0 constraints initially,
# since we do not know the size of the problem.
task = env.Task (0, 0)
task.set_Stream (mosek.streamtype.log, streamprinter)
# We assume that a problem file was given as the first command
# line argument (received in `argv')
task.readdata (sys.argv[1])
# Solve the problem
task.optimize ()
# Print a summary of the solution
task.solutionsummary (mosek.streamtype.log)
# If an output file was specified, write a solution
if len(sys.argv) >= 3:
# We define the output format to be OPF, and tell MOSEK to
# leave out parameters an | d problem data from the output file.
task.putintparam (mosek.iparam.write_data_format, mosek.dataformat.op)
task.putintparam (mosek.iparam.opf_write_solutions, mosek.onoffkey.on)
task.putintparam (mosek.iparam.opf_write_hints, mosek.onoffkey.off)
task.putintparam (mosek.iparam.opf_write_parameters, mosek.onoffkey.off)
task.putintparam (mosek.iparam.opf_write_pro | blem, mosek.onoffkey.off)
task.writedata (sys.argv[2])
|
gizmag/django-arrow-field | arrow_field/model_fields.py | Python | mit | 1,161 | 0 | from __future__ import unicode_literals
import arrow
from django.db.models import DateTimeField, SubfieldBase
from .form_fields import ArrowField as ArrowFormField
class ArrowField(DateTimeField):
__metaclass__ = SubfieldBase
def to_python(self, value):
if isinstance(value, arrow.Arrow):
return value
value = super(ArrowField, self).to_python(value)
if value:
return arrow.get(value)
def get_prep_value(self, value):
if value:
return value.datetime
def value_to_string(self, obj):
value = se | lf._get_val_from_obj(obj)
return '' if value is None else value.isoformat()
def pre_save(self, model_instance, a | dd):
if self.auto_now or (self.auto_now_add and add):
value = arrow.utcnow()
setattr(model_instance, self.attname, value)
return value
else:
return super(ArrowField, self).pre_save(model_instance, add)
def formfield(self, **kwargs):
defaults = {'form_class': ArrowFormField}
defaults.update(kwargs)
return super(ArrowField, self).formfield(**defaults)
|
codeurimpulsif/PyMCS | console.py | Python | gpl-3.0 | 1,539 | 0.014945 | #!/usr/bin/python
# Console
import sys, os, time, subprocess
def MCS():
return subprocess.Popen(['python', 'mcs.py'])
def color(text, color):
if color == 0: color = "\033[0m"
if color == 1: color = "\033[94m"
if color == 2: color = "\033[92m"
if color == 3: color = "\033[91m"
if color == 4: color = "\033[93m"
return color+text+"\033[0m"
def showMenu():
print("\033[H\033[J", end="")
print("== Music Control System v0.1 ==")
print("= =")
print("= {} MCS Time {} =".format(color("[7]",1), color("[8]",3)))
print("= {} MCS Auto {} =".format(color("[4]",1), color("[5]",3)))
print("= {} MCS EDT {} =".format(color("[1]",1), color("[2]",3)))
print("= =")
print("========= Informations ========")
print("= =")
print("= =")
print("= =")
print("===============================")
def main():
# Old hack (TODO)
class main:
def poll(): return True
while 1:
showMenu()
command = str(input("=> "))
if command == '4':
if main.poll() == None: |
main.terminate()
main.kill()
main = MCS()
if c | ommand == '5':
if main.poll() == None:
main.terminate()
main.kill()
if command == 'q':
exit(0)
if __name__ == "__main__": main()
|
Debetux/sr.ht | setup.py | Python | mit | 300 | 0.066667 | from distutils.core import setup
setup(
name = 'srht',
packages = ['srht'],
version = '1.0.0',
description | = 'sr.ht services',
author = 'Drew DeVault',
author_email = 'sir@cmpwn.com',
url = 'https://github.com/SirCmpwn/sr.ht',
download_url = '',
keywords = [],
classifie | rs = []
)
|
sarielsaz/sarielsaz | share/rpcuser/rpcuser.py | Python | mit | 1,118 | 0.005367 | #!/usr/bin/env python2
# Copyright (c) 2015-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompa | nying
# file COPYING or http://www.opensource. | org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to sarielsaz.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
larsonmp/security-system | src/service/rest/client.py | Python | lgpl-3.0 | 1,511 | 0.02912 | from io import BytesIO
from json import dumps
from mimetypes import guess_all_extensions
from os.path import join
from requests import delete, get, post
from tempfile import gettempdir
def guess_extension(mimetype):
#there's an oddity on raspbian wherein guess_extension is non-deterministic
return sorted(guess_all_extensions(mimetype))[-1]
class Client(object):
def __init__(self, protocol='http', host='localhost', port=80):
super().__init__()
self._base_url = '{}://{}:{}'.format(protocol, host, port)
def capture_snapshot(self, cid=0, count=1):
url = '{base_url}/camera/{cid:d}/snapshot'.format(base_url=self._base_url, cid=cid)
body = {
'count': count
}
headers = {
'Content-Type': 'application/json'
}
return post(url, headers=headers, data=dumps(body)).json()
def delete_snapshot(self, sid, cid=0):
url = '{base_url}/camera/{cid:d}/snapshot/{sid}'.format(base_url=self._base_url, cid=cid, sid=sid)
return delete(url)
def get_snapshot(self, sid, cid=0):
url = '{base_url}/camera/{cid:d}/snapshot/{sid}'.format(base_url=self._base_url, cid=c | id, sid=sid)
response = get(url)
extension = guess_ex | tension(response.headers['content-type'])
filepath = join(gettempdir(), sid + extension)
with open(filepath, 'wb') as fp:
fp.write(BytesIO(response.content).read())
return filepath
def get_all_snapshot_ids(self, cid=0):
url = '{base_url}/camera/{cid:d}/snapshot'.format(base_url=self._base_url, cid=cid)
response = get(url)
return response.json()
|
gminds/rapidnewsng | django/contrib/comments/admin.py | Python | bsd-3-clause | 3,681 | 0.00326 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.comments.models import Comment
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib.comments import get_model
from django.contrib.comments.views.moderation import perform_flag, perform_approve, perform_delete
class UsernameSearch(object):
"""The User object may not be auth.User, so we need to provide
a mechanism for issuing the equivalent of a .filter(user__username=...)
search in CommentAdmin.
"""
def __str__(self):
return 'user__%s' % get_user_model().USERNAME_FIELD
class CommentsAdmin(admin.ModelAdmin):
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
list_display = ('name', 'content_type', 'object_pk', 'ip_address', 'submit_date', 'is_public', 'is_removed')
list_filter = ('submit_date', 'site', 'is_public', 'is_removed')
# date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user',)
search_fields = ('comment', UsernameSearch(), 'user_name', 'user_email', 'user_url', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments"]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve | selected comment | s")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext('1 comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
if get_model() is Comment:
admin.site.register(Comment, CommentsAdmin)
|
IvanMalison/okcupyd | okcupyd/db/model/__init__.py | Python | mit | 107 | 0 | from .message import Message
from .message_thread i | mport MessageThread
from . | user import User, OKCupydUser
|
Bartzi/stn-ocr | mxnet/operations/disable_shearing.py | Python | gpl-3.0 | 1,178 | 0 | import os
import mxnet as mx
# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
os.environ["MXNET_CPU_WORKER_NTHREADS"] = "2"
class DisableShearing(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
out = in_data[0].copy().asnumpy().reshape(-1, 2, 3)
out[:, 0, 1] = 0
out[:, 1, 0] = 0
self.assign(out_data[0], req[0], mx.nd.array(out.reshape(-1, 6)))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
grad = out_grad[0].asnumpy().reshape(-1, 2, 3)
grad[:, 0, 1] = 0
grad[: | , 1, 0] = 0
self.assign(in_grad[0], req[0], mx.nd.array(grad.reshape(-1, 6)))
@mx.operator.register("DisableShearing")
class DisableShearingProp(mx.operator.CustomOpProp):
def __init__(self):
super(DisableShearingProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(sel | f, in_shape):
return [in_shape[0]], [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return DisableShearing()
|
F5Networks/f5-common-python | devtools/bin/create-test-list.py | Python | apache-2.0 | 4,268 | 0.002812 | #!/usr/bin/env python
"""
Rules
for *.py files
* if the changed file is __init__.py, and there is a side-band test/ dir, then test the entire test/functional directory
the reason for this is that the init files are usually organizing collections
and those can affect many different apis if they break
* if the filename is test_*.py then include it
* if the filename is *.py, then check to see if it has an associated test_FILENAME file
and if so, include it in the test
* summarize all of the above so that a test_FILENAME that is a subpath of the first bullet
is not tested twice
for non-*.py files
* if the file is in a test/functional directory, test the whole directory
"""
import subprocess
import os
import shutil
import argparse
def cleanup_tox_directory():
if os.path.exists('.tox'):
shutil.rmtree('.tox')
def examine_python_rules(line):
fname, fext = os.path.splitext(line)
filename = os.path.basename(line)
dirname = os.path.dirname(line)
test_filename = 'test_' + filename
functional_test_file = '{0}/test/functional/{1}'.format(dirname, test_filename)
functional_test_dir = '{0}/test/functional/'.format(dirname)
if filename == '__init__.py' and os.path.exists(functional_test_dir):
return functional_test_dir
elif filename.startswith('test_') and filename.endswith('.py'):
return line
elif fext == '.py' and os.path.exists(functional_test_file):
return functional_test_file
elif 'test/functional' in line and filename == '__init__.py':
print(" * Skipping {0} because it is not a test file".format(line))
elif filename == '__init__.py' and not os.path.exists(functional_test_dir):
print(" * {0} does not have a side-band test directory!".format(line))
else:
print(" * {0} did not match any rules!".format(line))
def examine_non_python_rules(line):
if 'test/functional' in line:
return os.path.dirname(line)
def determine_files_to_test(product, commit):
results = []
build_all = [
'setup.py', 'f5/bigip/contexts.py', 'f5/bigip/mixins.py',
'f5/bigip/resource.py', 'f5sdk_plugins/fixtures.py',
'f5/bigip/__init__.py'
]
output_file = "pytest.{0}.jenkins.txt".format(product)
p1 = subprocess.Popen(
['git', '--no-pager', 'diff', '--name-only', 'origin/development', commit],
stdout=subprocess.PIPE,
)
p2 = subprocess.Popen(
['egrep', '-v', '(^requirements\.|^setup.py)'],
stdin=p1.stdout,
stdout=subprocess.PIPE,
)
p3 = subprocess.Popen(
['egrep', '(^f5\/{0}\/)'.format(product)],
stdin=p2.stdout,
stdout=subprocess.PIPE,
)
out, err = p3.communicate()
out = out.splitlines()
out = filter(None, out)
if not out:
return
for line in out:
fname, fext = os.path.splitext(line)
if not os.path.exists(line):
print "{0} was not found. Maybe this is a rename?".format(line)
continue
if line in build_all:
cleanup_tox_directory()
results.append('f5/{0}'.format(product))
elif fext == '.py':
result = examine_python_rules(line)
if result:
results.append(result)
else:
result = examine_non_python_rules(line)
if result:
results.append(result)
if results:
results = set(results)
results = compress_testable_files(results)
fh = open(output_file, 'w')
fh.writelines("%s\n" % l for l in results)
fh.close()
def compress_testable_files(files):
lines = sorted(files)
for idx, item in enumerate(lines):
file, ext = os.path.splitext(item)
if not ext and not file.endswith('/'):
item += '/'
tmp = [x for x in lines if item in x and item != x]
for _ in tmp:
lines.remove(_)
return lines
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c','- | -commit', help='Git commit to check', required=True)
args = parser.parse_args()
for product in ['iworkflow', 'bigip', 'bigiq']:
d | etermine_files_to_test(product, args.commit)
|
SpectraLogic/ds3_python3_sdk | tests/helpersTests.py | Python | apache-2.0 | 12,900 | 0.003953 | # Copyright 2021 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import unittest
import os
import tempfile
import uuid
import concurrent.futures
from ds3 import ds3
from ds3 import ds3Helpers
from typing import List, Dict
import xml.etree.ElementTree as xmlDom
def create_files_in_directory(directory: str, num_files: int, root_dir: str,
include_dirs: bool = True) -> List[ds3Helpers.HelperPutObject]:
put_objects = []
# create the directory if it doesn't exist
if not os.path.exists(directory):
os.mkdir(path=directory)
if include_dirs:
obj_name = ds3Helpers.file_path_to_object_store_name(os.path.join(os.path.relpath(directory, root_dir), ""))
put_objects.append(ds3Helpers.HelperPutObject(object_name=obj_name, file_path=directory, size=0))
# create an empty subdirectory
if include_dirs:
dir_path = os.path.join(directory, 'empty-dir')
os.mkdir(path=dir_path)
obj_name = ds3Helpers.file_path_to_object_store_name(os.path.join(os.path.relpath(dir_path, root_dir), ""))
put_objects.append(ds3Helpers.HelperPutObject(object_name=obj_name, file_path=directory, size=0))
# create some files
for i in range(num_files):
file_path = os.path.join(directory, f'file-{i}.txt')
f = open(file_path, "a")
f.write(f'I am file number {i}')
f.close()
obj_name = ds3Helpers.file_path_to_object_store_name(os.path.relpath(file_path, root_dir))
size = os.path.getsize(file_path)
put_objects.append(ds3Helpers.HelperPutObject(object_name=obj_name, file_path=file_path, size=size))
return put_objects
class Ds3HelpersTestCase(unittest.TestCase):
def test_file_path_to_object_store_name(self):
self.assertEqual(ds3Helpers.file_path_to_object_store_name(os.path.join("some", "dir", "")), 'some/dir/')
self.assertEqual(ds3Helpers.file_path_to_object_store_name(os.path.join("some", "file")), 'some/file')
def test_marshaling_put_object_list(self):
dir_obj = ds3.Ds3PutObject(name="dir-0/", size=0)
object_list: List[ds3.Ds3PutObject] = [dir_obj]
xml_object_list = ds3.Ds3PutObjectList(object_list)
to_xml = xml_object_list.to_xml()
result = xmlDom.tostring(to_xml)
self.assertEqual(result, b'<Objects><Object Name="dir-0/" Size="0" /></Objects>')
@staticmethod
def write_to_stream(i: int, char: str, get_object: ds3Helpers.HelperGetObject):
offset = i * 10
content = ''
for j in range(10):
content += char
stream = get_object.get_data_stream(offset)
stream.write(bytes(content, 'utf-8'))
stream.close()
def test_get_object_data_stream(self):
directory = tempfile.TemporaryDirectory(prefix="ds3-python3-sdk-")
file_path = os.path.join(directory.name, "sub-dir", "file.txt")
get_object = ds3Helpers.HelperGetObject(object_name="file.txt", destination_path=file_path)
inputs = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
expected: str = ''
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
for i in range(len(inputs)):
for j in range(10):
expected += inputs[i]
executor.submit(self.write_to_stream, i, inputs[i], get_object)
file = open(file_path)
content = file.read()
self.assertEqual(expected, content)
file.close()
directory.cleanup()
def test_put_and_get_objects(self):
bucket = f'ds3-python3-sdk-test-{uuid.uuid1()}'
# create temporary directory with some files
source = tempfile.TemporaryDirectory(prefix="ds3-python3-sdk-src-")
put_objects = create_files_in_directory(directory=source.name,
num_files=10,
root_dir=source.name,
include_dirs=False)
# create the BP helper and perform the put all objects call
client = ds3.createClientFromEnv()
client.put_bucket_spectra_s3(ds3.PutBucketSpectraS3Request(name=bucket))
helpers = ds3Helpers.Helper(client=client)
job_id = helpers.put_objects(bucket=bucket, put_objects=put_objects)
self.assertNotEqual(jo | b_id, "", "job id was returned")
# verify all the files | and directories are on the BP
head_obj = client.head_object(ds3.HeadObjectRequest(bucket_name=bucket, object_name="does-not-exist"))
self.assertEqual(head_obj.result, "DOESNTEXIST")
for put_object in put_objects:
head_obj = client.head_object(ds3.HeadObjectRequest(bucket_name=bucket, object_name=put_object.object_name))
self.assertNotEqual(head_obj.result, "DOESNTEXIST")
# retrieve the files from the BP
destination = tempfile.TemporaryDirectory(prefix="ds3-python3-sdk-dst-")
get_objects: List[ds3Helpers.HelperGetObject] = []
object_name_to_source: Dict[str, str] = dict()
for put_object in put_objects:
destination_path = os.path.join(destination.name, put_object.object_name)
get_objects.append(
ds3Helpers.HelperGetObject(object_name=put_object.object_name, destination_path=destination_path))
object_name_to_source[put_object.object_name] = put_object.file_path
# perform the get objects call
job_id = helpers.get_objects(bucket=bucket, get_objects=get_objects)
self.assertNotEqual(job_id, "", "job id was returned")
for get_object in get_objects:
original_file = open(object_name_to_source[get_object.object_name], 'rb')
retrieved_file = open(get_object.destination_path, 'rb')
original_content = original_file.read()
retrieved_content = retrieved_file.read()
self.assertEqual(original_content, retrieved_content)
original_file.close()
retrieved_file.close()
# cleanup
source.cleanup()
destination.cleanup()
client.delete_bucket_spectra_s3(ds3.DeleteBucketSpectraS3Request(bucket_name=bucket, force=True))
def test_put_and_get_all_objects_in_directory(self):
bucket = f'ds3-python3-sdk-test-{uuid.uuid1()}'
# create temporary directory with some files and subdirectories
source = tempfile.TemporaryDirectory(prefix="ds3-python3-sdk-src-")
put_objects = create_files_in_directory(directory=source.name, num_files=5, root_dir=source.name)
for i in range(2):
sub_dir_path = os.path.join(source.name, f'dir-{i}')
put_objects += create_files_in_directory(directory=sub_dir_path, num_files=2, root_dir=source.name)
for j in range(2):
sub_sub_dir_path = os.path.join(sub_dir_path, f'sub-dir-{j}')
put_objects += create_files_in_directory(directory=sub_sub_dir_path,
num_files=2,
root_dir=source.name)
# create the BP helper and perform the put all objects call
client = ds3.createClientFromEnv()
client.put_bucket(ds3.PutBucketRequest(bucket_name=bucket))
helpers = ds3Helpers.Helper(client=client)
job_ids = helpers.put_all_objects_in_directory(source_dir=source.name, bucket=bucket, objects_per_bp_job=10)
self.assertGreaterEqual(len(job_ids), 1, "received at least one job id")
# verify all the files and directories are on t |
HatPull/tornado-elasticsearch-proxy | es_proxy/tests/constants.py | Python | apache-2.0 | 648 | 0 |
SAMPLE_POLICIES | = [
{
'resources': {
'indices': ['kibana-int', ],
},
'users': ['*'],
'permissions': ['index_write', 'index_read']
},
{
'resources': {
'cluster': True
| },
'users': ['alan'],
'permissions': ['kibana_admin', ]
},
{
'resources': {
'indices': ['joes_index', ],
},
'users': ['joe'],
'permissions': ['index_write', 'index_read']
},
{
'resources': {
'indices': ['*', ],
},
'users': ['auditor', ],
'permissions': ['index_read']
},
]
|
stackforge/python-openstacksdk | openstack/identity/v3/service.py | Python | apache-2.0 | 1,798 | 0 | # Licensed under the Apache License, Version 2.0 (the "L | icense"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distrib | uted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Service(resource.Resource):
resource_key = 'service'
resources_key = 'services'
base_path = '/services'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
_query_mapping = resource.QueryParameters(
'name',
'type',
)
# Properties
#: User-facing description of the service. *Type: string*
description = resource.Body('description')
#: Setting this value to ``False`` prevents the service and
#: its endpoints from appearing in the service catalog. *Type: bool*
is_enabled = resource.Body('enabled', type=bool)
#: The links for the service resource.
links = resource.Body('links')
#: User-facing name of the service. *Type: string*
name = resource.Body('name')
#: Describes the API implemented by the service. The following values are
#: recognized within the OpenStack ecosystem: ``compute``, ``image``,
#: ``ec2``, ``identity``, ``volume``, ``network``. To support non-core and
#: future projects, the value should not be validated against this list.
#: *Type: string*
type = resource.Body('type')
|
steinnymir/RegAscope2017 | test_scripts/GUI_test/PyQt5 Examples/inputDialogs.py | Python | mit | 1,518 | 0.011199 | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit
from PyQt5.QtGui import QIcon
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 input dialogs - python | spot.com'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def init | UI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.getInteger()
self.getText()
self.getDouble()
self.getChoice()
self.show()
def getInteger(self):
i, okPressed = QInputDialog.getInt(self, "Get integer","Percentage:", 28, 0, 100, 1)
if okPressed:
print(i)
def getDouble(self):
d, okPressed = QInputDialog.getDouble(self, "Get double","Value:", 10.50, 0, 100, 10)
if okPressed:
print( d)
def getChoice(self):
items = ("Red","Blue","Green")
item, okPressed = QInputDialog.getItem(self, "Get item","Color:", items, 0, False)
if ok and item:
print(item)
def getText(self):
text, okPressed = QInputDialog.getText(self, "Get text","Your name:", QLineEdit.Normal, "")
if okPressed and text != '':
print(text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
boxeehacks/boxeehack | hack/boxee/skin/boxee/720p/scripts/boxeehack_settings.py | Python | mit | 14,393 | 0.008893 | import os
import xbmc, xbmcgui, mc
import ConfigParser
import common
available_providers = ['Addic7ed', 'BierDopje', 'OpenSubtitles', 'SubsWiki', 'Subtitulos', 'Undertexter']
# Set some default values for the subtitles handling
def register_defaults():
subtitle_provider("get", "default")
subtitle_provider("get", "tv")
subtitle_provider("get", "movie")
common.set_string("subtitles-plugin-language", get_subtitles_language_filter() )
common.set_string("subtitles-plugin", get_subtitles_enabled() )
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
common.set_string("browser-homepage", "".join(get_browser_homepage().split("http://")) )
if not os.path.exists("/data/etc/.subtitles"):
common.file_put_contents("/data/etc/.subtitles", """[DEFAULT]
lang = All
movieplugins = OpenSubtitles,Undertexter
tvplugins = BierDopje,OpenSubtitles,Addic7ed,Subtitulos,SubsWiki,Undertexter
plugins = BierDopje,OpenSubtitles,Subtitulos,SubsWiki,Addic7ed,Undertexter
[BierDopje]
key = C2FAFCBE34610608
""")
set_home_enabled_strings()
version_local = get_local_version()
if version_local != "":
common.set_string("boxeeplus-version", version_local )
def get_home_enabled_default_list():
return "-,friends|Built-in,watchlater,shows|Built-in,movies|Built-in,music|Built-in,apps,files,web"
def set_home_enabled_strings():
homeitems = get_home_enabled_default_list().split(",")
for item in homeitems:
item = item.split("|")[0]
common.set_string("homeenabled-%s" % item, get_homeenabled(item))
common.set_string("home-%s-replacement" % item, get_homereplacement(item))
def get_jump_to_last_unwatched_value():
jumpenabled = common.file_get_contents("/data/etc/.jump_to_unwatched_enabled")
if jumpenabled == "":
jumpenabled = "0"
return jumpenabled
def toggle_jump_to_last_unwatched():
jumpenabled = get_jump_to_last_unwatched_value()
if jumpenabled == "1":
jumpenabled = "0"
else:
jumpenabled = "1"
common.file_put_contents("/data/etc/.jump_to_unwatched_enabled", jumpenabled)
common.set_string("jump-to-unwatched", jumpenabled)
def get_homeenabled_value():
homeenabled = common.file_get_contents("/data/etc/.home_enabled")
if homeenabled == "":
homeenabled = get_home_enabled_default_list()
return homeenabled.split("\n")[0]
def get_homereplacement(section):
homeenabled = get_homeenabled_value().split(",")
replacement = ""
for item in homeenabled:
item = item.split("|")
if item[0] == section:
if len(item) > 1:
replacement = item[1]
else:
rep | lacement = "Built-in"
if replacement == "":
replacement = "Off"
return replacement
def get_homeenabled(section):
homeenabled = get_homeenabled_value().split(",")
section = "%s" % section
for item in homeenabled:
item = item.split("|")[0]
if item == section:
return "1"
return "0"
def toggle_homeenabled(section, action):
homeenabled = get_homeenabled_value().split(",")
if section in ["frie | nds","shows","movies","music"]:
if section == "friends":
types = ["Built-in", "Netflix", "Vudu", "Navi-X", "Spotify", "Grooveshark", "Pandora", "BBC iPlayer", "Revision3", "Crunchyroll", "Off"]
if section == "shows":
types = ["Built-in", "BBC iPlayer", "Revision3", "Crunchyroll", "Off"]
if section == "movies":
types = ["Built-in", "Netflix", "Vudu", "Navi-X", "Off"]
if section == "music":
types = ["Built-in", "Spotify", "Grooveshark", "Pandora", "Off"]
replacement = get_homereplacement(section)
for item in homeenabled:
itemname = item.split("|")[0]
if itemname == section:
homeenabled.remove(item)
pos = types.index(replacement)
if action == "next":
pos = pos + 1
if action == "previous":
pos = pos - 1
if pos >= len(types):
pos = 0
if pos < 0:
pos = len(types) - 1
if types[pos] != "Off":
homeenabled.append("%s|%s" % (section, types[pos]))
else:
found = 0
for item in homeenabled:
itemname = item.split("|")[0]
if itemname == section:
homeenabled.remove(item)
found = 1
if found == 0:
homeenabled.append(section)
common.file_put_contents("/data/etc/.home_enabled", ",".join(homeenabled))
set_home_enabled_strings()
def get_browser_homepage():
homepage = common.file_get_contents("/data/etc/.browser_homepage")
if homepage == "":
homepage = "http://www.myfav.es/boxee"
return homepage
def set_browser_homepage():
homepage = get_browser_homepage()
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(homepage)
kb.setHeading('Enter homepage URL') # optional
kb.setHiddenInput(False) # optional
kb.doModal()
if kb.isConfirmed():
homepage = kb.getText()
common.file_put_contents("/data/etc/.browser_homepage", homepage)
template = common.file_get_contents("/data/hack/apps/browser2/template.xml")
template = homepage.join(template.split("$URL$"))
common.file_put_contents("/data/hack/apps/browser2/descriptor.xml", template)
os.system("sh /data/hack/apps.sh")
common.set_string("browser-homepage", "".join(get_browser_homepage().split("http://")) )
# Set the password for the telnet functionality
def set_telnet_password():
passwd = common.file_get_contents("/data/etc/passwd")
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(passwd) # optional
kb.setHeading('Enter telnet password') # optional
kb.setHiddenInput(True) # optional
kb.doModal()
if kb.isConfirmed():
passwd = kb.getText()
if passwd == "":
dialog = xbmcgui.Dialog()
ok = dialog.ok('Telnet', 'The telnet password must not be empty.')
else:
common.file_put_contents("/data/etc/passwd", passwd)
# Determine whether subtitle functionality is enabled/enabled
def get_subtitles_enabled():
subtitles = common.file_get_contents("/data/etc/.subtitles_enabled")
if subtitles == "":
subtitles = "0"
return subtitles
def get_subtitles_language_filter():
config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
if os.path.exists("/data/etc/.subtitles"):
config.read("/data/etc/.subtitles")
langs_config = config.get("DEFAULT", "lang")
if(langs_config.strip() == "" or langs_config.strip() == "All"):
return "0"
else:
return "1"
def featured_next():
replace = get_featured_feed_value()
num = int(replace) + 1
if num > 4: num = 0
replace = "%s" % num
common.file_put_contents("/data/etc/.replace_featured_enabled", replace)
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
def featured_previous():
replace = get_featured_feed_value()
num = int(replace) - 1
if num < 0: num = 4
replace = "%s" % num
common.file_put_contents("/data/etc/.replace_featured_enabled", replace)
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
def get_featured_feed():
replace = get_featured_feed_value()
feed = "feed://featured/?limit=15"
if replace == "1": feed = "boxeedb://recent/?limit=15"
if replace == "2": feed = "rss://vimeo.com/channels/staffpicks/videos/rss"
if replace == "3": feed = "rss://gdata.youtube.com/feeds/api/standardfeeds/recently_featured?alt=rss"
if replace == "4": feed = "about:blank"
return feed
def g |
desion/tidy_page | tidypage/extractor.py | Python | mit | 13,177 | 0.004707 | #-*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
import logging
import sys
import re
from .cleaners import clean_tag
from .cleaners import clean_spam
import gzip
log = logging.getLogger("tidypage.extractor")
TEXT_TAG_COLLECTION = {"p":5, "span":4, "font":3, "i":2, "b":1, "pre": 1}
REGEXES = {
'positiveRe': re.compile('article|body|content|entry|hentry|main|page|pagination|post|text|blog|story', re.I),
'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget|recommend|clearfix', re.I),
}
class Document:
"""Class to build a dom tree from html."""
def __init__(self, html,isForeign = True, url=None):
"""Generate the document
:param html: string of the html content.
:param url: url of the html
"""
self.url = url
self.html = html
self.link_num = 0
self.link_text_len = 0
self.total_text_tag_num = 0
self.total_text_len = 0
self.text_tag_num = 0
self.text_tag_text_len = 0
self.is_foreign = isForeign
self.doc = self._parse(self.html)
clean_tag(self.doc)
def _parse(self, html):
soup = BeautifulSoup(html, "lxml")
return soup
def html_title(self):
"""Returns document title"""
return self.doc.title.string
def prettify(self):
"""Returns prettify document"""
return self.doc.prettify("utf-8")
def get_dom(self):
return self.doc
def content(self):
"""get the content of html page"""
clean_spam(self.doc)
candidates = self.get_candidates()
best_node = self.best_candidates(candidates)
if best_node:
return self.purify(best_node["elem"])
else:
return None
def is_index_page(self):
"""estimate the page is index page or not"""
link_density = self.get_link_tag_density(self.doc)
mean_text_block = float(self.text_tag_text_len) / max(self.text_tag_num, 1)
if link_density > 0.45:
return True
elif link_density > 0.30:
"""the foreign language page is different from chinese page"""
if self.is_foreign and mean_text_block < 30:
return True
elif not self.is_foreign and mean_text_block < 20:
return True
else:
return False
else:
return False
def walk(self):
"""walk the dom tree and get the info of the page"""
g = self.doc.recursiveChildGenerator()
while True:
try:
tag = g.next()
if not isinstance(tag,unicode):
if tag.name == "a" and ((self.is_foreign and len(tag.text) > 10) or (not self.is_foreign and len(tag.text) > 4)):
self.link_num += 1
self.link_text_len += len(tag.getText())
elif TEXT_TAG_COLLECTION.has_key(tag.name):
tag_text = tag.contents[0] if len(tag.contents) > 0 and isinstance (tag.contents[0], unicode) else ""
if len(tag_text) > 0:
self.text_tag_num += 1
self.text_tag_text_len += len(tag_text)
else:
self.total_text_len += len(tag)
except StopIteration:
break
def content_block_len(self):
block_size = 3
block_array = map(len, self.doc.strings)
block_set = []
for i in range(0, len(block_array) - block_size):
block_text_len = 0
for j in range(i, i + block_size):
block_text_len += (block_array[j] - 1)
block_set.append(block_text_len)
blk_num = len(block_set)
start = -1
end = -1
max_text_len = 0
cur_text_len = 0
i = 0
while i < blk_num:
if block_set[i] == 0:
if cur_text_len > max_text_len:
max_text_len = cur_text_len
start = tmp
end = i | - 1
cur_text_len = 0
i += 1
continue
if cur_text_len == 0:
tmp = i
cur_text_len += block_set[i]
i += 1
def text_weight(self, elem):
content_score | = 1
long_text_line = 0
block_size = 3
inner_text = ""
for string in elem.stripped_strings:
if (self.is_foreign and len(string) > 100) or (not self.is_foreign and len(string) > 50):
long_text_line += 1
inner_text += string
else:
inner_text += string
"""for punch"""
if len(inner_text) > 0:
if self.is_foreign:
splits = re.split(u",|\.|\?", inner_text)
content_score += len(splits)
else:
splits = re.split(u"|,|。|?", inner_text)
content_score += len(splits)
"""for text len"""
if self.is_foreign:
content_score += min((len(inner_text) / 100), 5)
else:
content_score += min((len(inner_text) / 20), 5)
"""for text block"""
block_array = map(len, elem.strings)
block_set = []
for i in range(0, len(block_array) - block_size):
block_text_len = 0
for j in range(i, i + block_size):
block_text_len += (block_array[j] - 1)
block_set.append(block_text_len)
short_block = 0
blk_text_len = 0
for block in block_set:
blk_text_len += block
if (self.is_foreign and block < 50) or (not self.is_foreign and block < 10):
short_block += 1
short_block_ratio = float(short_block) / max(len(block_set), 1)
if short_block_ratio > 0.3:
content_score -= 10
return content_score
def class_weight(self, elem):
weight = 0
for feature in [elem.get('class', None), elem.get('id', None)]:
try:
if feature:
if REGEXES['negativeRe'].search(feature):
weight -= 25
if REGEXES['positiveRe'].search(feature):
weight += 25
except:
continue
return weight
def node_weight(self, elem):
content_score = 0
name = elem.name
if name == "div":
content_score += 5
elif name in ["pre", "td", "blockquote"]:
content_score += 3
elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]:
content_score -= 3
elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]:
content_score -= 5
return content_score
def score_node(self, elem):
class_score = self.class_weight(elem)
node_score = self.node_weight(elem)
content_score = class_score + node_score
return {
'score': content_score,
'elem': elem
}
def get_candidates(self):
candidates = {}
g = self.doc.recursiveChildGenerator()
while True:
try:
tag = g.next()
#text node
if isinstance(tag,unicode):
if (self.is_foreign and len(tag) > 40) or (not self.is_foreign and len(tag) > 20):
text_tag = tag.parent
if text_tag is None:
continue
if text_tag.name not in ["p", "span", "td", "pre", "i", "b"]:
continue
parent_node = text_tag.parent
if parent_node is not None and parent_node not in candidates:
candidates[parent_node] = self.score_node(parent_node)
candidates[parent_node]['score'] += self.text_weight(parent_node)
|
maoy/zknova | nova/scheduler/filters/io_ops_filter.py | Python | apache-2.0 | 1,650 | 0.001212 | # Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of t | he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expr | ess or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Ignore hosts that have too many builds/resizes/snaps/migrations")
CONF = cfg.CONF
CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
"""Filter out hosts with too many concurrent I/O operations."""
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
max_io_ops = CONF.max_io_ops_per_host
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s"), locals())
return passes
|
Distrotech/bzr | tools/package_mf.py | Python | gpl-2.0 | 2,892 | 0.000346 | # Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Custom module finder for entire package"""
import modulefinder
import os
import sys
class CustomModuleFinder(modulefinder.ModuleFinder):
"""Custom module finder for processing python packages,
e.g. bzr plugins packages.
:param path: list of directories to search for modules;
| if not s | pecified, python standard library only is used.
"""
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = [os.path.dirname(os.__file__)] # only python std lib
modulefinder.ModuleFinder.__init__(self, path, debug, excludes,
replace_paths)
def run_package(self, package_path):
"""Recursively process each module in package with run_script method.
:param package_path: path to package directory.
"""
stack = [package_path]
while stack:
curdir = stack.pop(0)
py = os.listdir(curdir)
for i in py:
full = os.path.join(curdir, i)
if os.path.isdir(full):
init = os.path.join(full, '__init__.py')
if os.path.isfile(init):
stack.append(full)
continue
if not i.endswith('.py'):
continue
if i == 'setup.py': # skip
continue
self.run_script(full)
def get_result(self):
"""Return 2-tuple: (list of packages, list of modules)"""
keys = self.modules.keys()
keys.sort()
mods = []
packs = []
for key in keys:
m = self.modules[key]
if not m.__file__: # skip builtins
continue
if m.__path__:
packs.append(key)
elif key != '__main__':
mods.append(key)
return (packs, mods)
if __name__ == '__main__':
package = sys.argv[1]
mf = CustomModuleFinder()
mf.run_package(package)
packs, mods = mf.get_result()
print 'Packages:'
print packs
print 'Modules:'
print mods
|
chrisspen/django-feeds | djangofeeds/admin.py | Python | bsd-2-clause | 7,712 | 0.006743 | from datetime import date, timedelta
from django.db.models import Q
from django.contrib import admin
from django.contrib.admin import FieldListFilter, ListFilter, SimpleListFilter
from django.utils.translation import (
ungettext,
ugettext_lazy as _
)
from django.http import HttpResponse, HttpResponseRedirect
from djangofeeds import conf
from djangofeeds.models import (
Feed, Post, Enclosure, Category,
BlacklistedDomain,
Article, NGram, PostNGram,
ArticleByDomain,
)
from admin_steroids.options import BetterRawIdFieldsModelAdmin, ReadonlyModelAdmin
from admin_steroids.utils import get_admin_changelist_url, view_related_link, view_link
from admin_steroids.filters import NullListFilter
BaseModelAdmin = admin.ModelAdmin
BaseModelAdmin = BetterRawIdFieldsModelAdmin
class FreshStaleListFilter(SimpleListFilter):
title = 'Freshness'
parameter_name = 'freshness'
default_value = None
def __init__(self, request, params, model, model_admin):
self.parameter_val = None
try:
self.parameter_val = request.GET.get(self.parameter_name, self.default_value)
if self.parameter_val is not None:
if self.parameter_val in (True, 'True', 1, '1'):
self.parameter_val = True
else:
self.parameter_val = False
except Exception, e:
pass
super(FreshStaleListFilter, self).__init__(request, params, model, model_admin)
def lookups(self, request, model_admin):
"""
Must be overriden to return a list of tuples (value, verbose value)
"""
return [
(None, _('All')),
(True, _('Fresh')), # default
(False, _('Stale')), # default
]
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.parameter_val == lookup,
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
if self.parameter_val is True:
# Only fresh.
queryset = Feed.objects.get_fresh(qs=queryset)
elif self.parameter_val is False:
# Only stale.
queryset = Feed.objects.get_stale(qs=queryset)
return queryset
class FeedAdmin(BaseModelAdmin):
"""Admin for :class:`djangofeeds.models.Feed`."""
list_display = (
'name',
'feed_url',
'date_created',
'date_last_refresh',
'is_active',
'fresh',
'post_link',
)
list_filter = (
'is_active',
FreshSt | aleListFilter,
)
search_fields = ['feed_url', 'name']
readonly_fields = (
'post_link',
'fresh',
| )
def lookup_allowed(self, *args, **kwargs):
if conf.ALLOW_ADMIN_FEED_LOOKUPS:
return True
return super(FeedAdmin, self).lookup_allowed(*args, **kwargs)
def post_link(self, obj=''):
try:
if not obj or not obj.id or not get_admin_changelist_url:
return ''
url = get_admin_changelist_url(Post) + ('?feed__id=%i' % obj.id)
count = obj.post_set.all().count()
return '<a href="%s" target="_blank"><input type="button" value="View %i" /></a>' % (url, count)
except Exception, e:
return str(e)
post_link.short_description = 'Posts'
post_link.allow_tags = True
class PostAdmin(BaseModelAdmin):
"""Admin for :class:`djangofeeds.models.Post`."""
list_display = (
'id',
'feed',
'title',
'link',
'author',
'date_updated',
'date_published',
'has_article',
)
raw_id_fields = (
'feed',
)
list_display_links = (
'feed',
'title',
)
list_filter = [
'article_content_error_code',
'article_content_success',
'article_ngrams_extracted',
]
search_fields = ['link', 'title']
date_hierarchy = 'date_updated'
readonly_fields = (
'has_article',
#'ngrams_link',
'article_ngrams_extracted',
'article_ngram_counts',
'article_ngrams_extracted_datetime',
)
actions = (
'reset_article_success',
)
def ngrams_link(self, obj=None):
if not obj:
return ''
return view_related_link(obj, 'ngrams')
ngrams_link.short_description = 'ngrams'
ngrams_link.allow_tags = True
def reset_article_success(self, request, queryset):
queryset.update(article_content_success=None)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
reset_article_success.short_description = 'Reset article content success flag on selected %(verbose_name_plural)s'
def lookup_allowed(self, *args, **kwargs):
if conf.ALLOW_ADMIN_FEED_LOOKUPS:
return True
return super(PostAdmin, self).lookup_allowed(*args, **kwargs)
def has_article(self, obj=None):
if not obj:
return ''
return bool(len((obj.article_content or '').strip()))
has_article.boolean = True
class BlacklistedDomainAdmin(BaseModelAdmin):
list_display = (
'domain',
'created',
)
search_fields = (
'domain',
)
readonly_fields = (
'created',
)
class ArticleAdmin(ReadonlyModelAdmin):
list_display = (
'id',
'year',
'month',
'total',
'has_article',
'ratio_extracted',
'mean_length',
)
class ArticleByDomainAdmin(ReadonlyModelAdmin):
list_display = (
'year',
'month',
'domain',
'total',
'missing',
'missing_without_error',
'missing_ratio',
'missing_without_error_ratio',
)
def get_queryset(self, request):
qs = super(ArticleByDomainAdmin, self).get_queryset(request)
today = date.today()
last_month = today - timedelta(days=30)
qs = qs.filter(
Q(year=today.year, month=today.month)|\
Q(year=last_month.year, month=last_month.month))
return qs
class NGramAdmin(BaseModelAdmin):
list_display = (
'text',
'n',
)
list_filter = (
'n',
)
search_fields = (
'text',
)
readonly_fields = (
'text',
'n',
)
class PostNGramAdmin(BaseModelAdmin):
list_display = (
'post',
'ngram',
'count',
)
search_fields = (
'ngram__text',
)
readonly_fields = (
'post',
'post_link',
'ngram',
'count',
)
fields = (
'post_link',
'ngram',
'count',
)
def post_link(self, obj=None):
if not obj:
return ''
return view_link(obj.post)
post_link.short_description = 'post'
post_link.allow_tags = True
if NullListFilter:
PostAdmin.list_filter.append(('article_content', NullListFilter))
admin.site.register(Category)
admin.site.register(Enclosure)
admin.site.register(Feed, FeedAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(BlacklistedDomain, BlacklistedDomainAdmin)
admin.site.register(Article, ArticleAdmin)
admin.site.register(ArticleByDomain, ArticleByDomainAdmin)
#admin.site.register(NGram, NGramAdmin)
#admin.site.register(PostNGram, PostNGramAdmin)
|
jeremiahyan/odoo | addons/link_tracker/models/mail_render_mixin.py | Python | gpl-3.0 | 3,440 | 0.00436 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import markupsafe
from werkzeug import urls, utils
from odoo import api, models, tools
class MailRenderMixin(models.AbstractModel):
_inherit = "mail.render.mixin"
# ------------------------------------------------------------
# TOOLS
# ------------------------------------------------------------
@api.model
def _shorten_links(self, html, link_tracker_vals, blacklist=None, base_url=None):
""" Shorten links in an html content. It uses the '/r' short URL routing
introduced in this module. Using the standard Odoo regex local links are
found and replaced by global URLs (not including mailto, tel, sms).
TDE FIXME: could be great to have a record to enable website-based URLs
:param link_tracker_vals: values given to the created link.tracker, containing
for example: campaign_id, medium_id, source_id, and any other relevant fields
like mass_mailing_id in mass_mailing;
:param list blacklist: list of (local) URLs to not shorten (e.g.
'/unsubscribe_from_list')
:param str base_url: either given, either based on config parameter
:return: updated html
"""
base_url = base_url or self.env['ir.config_parameter'].sudo().get_param('web.base.url')
short_schema = base_url + '/r/'
for match in | re.findall(tools.HTML_TAG_URL_REGEX, html):
href = markupsafe.Markup(match[0])
long_url = match[1]
label = (match[3] or '').strip()
if not blacklist or not [s for s in blacklist if s in long_url] and not long_url.startswith(short_schema) | :
create_vals = dict(link_tracker_vals, url=utils.unescape(long_url), label=utils.unescape(label))
link = self.env['link.tracker'].search_or_create(create_vals)
if link.short_url:
new_href = href.replace(long_url, link.short_url)
html = html.replace(href, new_href)
return html
@api.model
def _shorten_links_text(self, content, link_tracker_vals, blacklist=None, base_url=None):
""" Shorten links in a string content. Works like ``_shorten_links`` but
targetting string content, not html.
:return: updated content
"""
if not content:
return content
base_url = base_url or self.env['ir.config_parameter'].sudo().get_param('web.base.url')
shortened_schema = base_url + '/r/'
unsubscribe_schema = base_url + '/sms/'
for original_url in re.findall(tools.TEXT_URL_REGEX, content):
# don't shorten already-shortened links or links towards unsubscribe page
if original_url.startswith(shortened_schema) or original_url.startswith(unsubscribe_schema):
continue
# support blacklist items in path, like /u/
parsed = urls.url_parse(original_url, scheme='http')
if blacklist and any(item in parsed.path for item in blacklist):
continue
create_vals = dict(link_tracker_vals, url= utils.unescape(original_url))
link = self.env['link.tracker'].search_or_create(create_vals)
if link.short_url:
content = content.replace(original_url, link.short_url, 1)
return content
|
dsm054/pandas | pandas/core/resample.py | Python | bsd-3-clause | 67,558 | 0.000622 | from __future__ import annotations
import copy
from datetime import timedelta
from textwrap import dedent
from typing import (
Callable,
Hashable,
Literal,
final,
no_type_check,
)
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import (
BaseOffset,
IncompatibleFrequency,
NaT,
Period,
Timedelta,
Timestamp,
to_offset,
)
from pandas._typing import (
FrameOrSeries,
IndexLabel,
T,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
import pandas.core.algorithms as algos
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import (
DataError,
PandasObject,
)
import pandas.core.common as com
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.groupby.generic import SeriesGroupBy
from pandas.core.groupby.groupby import (
BaseGroupBy,
GroupBy,
_pipe_template,
get_groupby,
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.core.indexes.api import Index
from pandas.core.indexes.datetimes import (
DatetimeIndex,
date_range,
)
from pandas.core.indexes.period import (
PeriodIndex,
period_range,
)
from pandas.core.indexes.timedeltas import (
TimedeltaIndex,
timedelta_range,
)
from pandas.tseries.frequencies import (
is_subperiod,
is_superperiod,
)
from pandas.tseries.offsets import (
DateOffset,
Day,
Nano,
Tick,
)
_shared_docs_kwargs: dict[str, str] = {}
class Resampler(BaseGroupBy, PandasObject):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : Series or DataFrame
groupby : TimeGrouper
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatment
Returns
-------
a Resampler of the appropriate type
Notes
-----
After resampling, see aggregate, apply, and transform functions.
"""
grouper: BinGrouper
exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat
# to the groupby descriptor
_attributes = [
"freq",
"axis",
"closed",
"label",
"convention",
"loffset",
"kind",
"origin",
"offset",
]
def __init__(
self,
obj: FrameOrSeries,
groupby: TimeGrouper,
axis: int = 0,
kind=None,
*,
selection=None,
**kwargs,
):
self.groupby = groupby
self.keys = None
self.sort = True
self.axis = axis
self.kind = kind
self.squeeze = False
self.group_keys = True
self.as_index = True
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
self.binner, self.grouper = self._get_binner()
self._selection = selection
@final
def _shallow_copy(self, obj, **kwargs):
"""
return a new object with the replacement attributes
"""
if isinstance(obj, self._constructor):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return self._constructor(obj, **kwargs)
def __str__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
f"{k}={getattr(self.groupby, k)}"
for k in self._attributes
if getattr(self.groupby, k, None) is not None
)
return f"{type(self).__name__} [{', '.join(attrs)}]"
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self.groupby, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
# error: Signature of "obj" incompatible with supertype "BaseGroupBy"
@property
def obj(self) -> FrameOrSeries: # type: ignore[override]
# error: Incompatible return value type (got "Optional[Any]",
# expected "FrameOrSeries")
return self.groupby.obj # type: ignore[return-value]
@property
def ax(self):
# we can infer that this is a PeriodIndex/DatetimeIndex/TimedeltaIndex,
# but skipping annotating bc the overrides overwhelming
return self.groupby.ax
@property
def _from_selection(self) -> bool:
"""
Is the resampling from a DataFrame column or MultiIndex level.
"""
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return self.groupby is not None and (
self.groupby.key is not None or self.groupby.level is not None
)
def _convert_obj(self, obj: FrameOrSeries) -> FrameOrSeries:
"""
Provide any conversions for the object in order to correctly handle.
Parameters
----------
obj : Series or DataFrame
Returns
-------
Series or DataFrame
"""
return obj._consolidate()
| def _get_binner_for_time(self):
| raise AbstractMethodError(self)
@final
def _get_binner(self):
"""
Create the BinGrouper, assume that self.set_grouper(obj)
has already been called.
"""
binner, bins, binlabels = self._get_binner_for_time()
assert len(bins) == len(binlabels)
bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)
return binner, bin_grouper
@Substitution(
klass="Resampler",
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum
value in one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""",
)
@Appender(_pipe_template)
def pipe(
self,
func: Callable[..., T] | tuple[Callable[..., T], str],
*args,
**kwargs,
) -> T:
return super().pipe(func, *args, **kwargs)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
or list of string/callables.
DataFrame.resample.transform : Transforms the Series on each group
based on the given function.
DataFrame.aggregate: Aggregate using one or more
operations over the specified axis.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5],
... index=pd.date_range('20130101', periods=5, freq='s'))
>>> s
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum', 'mean', 'max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result': lambda x: x.mean() / x.std(),
... 'total': np.sum})
result total
2013-01-01 00:00:00 2.12132 |
blitzmann/Pyfa | gui/fitCommands/guiMetaSwap.py | Python | gpl-3.0 | 2,845 | 0.002109 | import wx
from service.fit import Fit
import gui.mainFrame
from gui import globalEvents as GE
from .calc.fitRemoveImplant import FitRemoveImplantCommand
from .calc.fitAddImplant import FitAddImplantCommand
from .calc.fitRemoveBooster import FitRemoveBoosterCommand
from .calc.fitAddBooster import FitAddBoosterCommand
from .calc.fitRemoveCargo import FitRemoveCargoCommand
from .calc.fitAddCargo import FitAddCargoCommand
from .calc.fitReplaceModule import FitReplaceModuleCommand
from .calc.fitAddFighter import FitAddFighterCommand
from .calc.fitRemoveFighter import FitRemoveFighterCommand
from .calc.fitChangeDroneVariation import FitChangeDroneVariationCommand
class GuiMetaSwapCommand(wx.Command):
def __init__(self, fitID, context, itemID, selection: list):
wx.Command.__init__(s | elf, True, "Meta Swap")
self. | mainFrame = gui.mainFrame.MainFrame.getInstance()
self.sFit = Fit.getInstance()
self.internal_history = wx.CommandProcessor()
self.fitID = fitID
self.itemID = itemID
self.context = context
self.data = []
fit = self.sFit.getFit(fitID)
if context == 'fittingModule':
for x in selection:
self.data.append(((FitReplaceModuleCommand, fitID, fit.modules.index(x), itemID),),)
elif context == 'implantItem':
for x in selection:
idx = fit.implants.index(x)
self.data.append(((FitRemoveImplantCommand, fitID, idx), (FitAddImplantCommand, fitID, itemID)))
elif context == 'boosterItem':
for x in selection:
idx = fit.boosters.index(x)
self.data.append(((FitRemoveBoosterCommand, fitID, idx), (FitAddBoosterCommand, fitID, itemID)))
elif context == 'cargoItem':
for x in selection:
self.data.append(((FitRemoveCargoCommand, fitID, x.itemID, 1, True), (FitAddCargoCommand, fitID, itemID, x.amount)))
elif context == 'fighterItem':
for x in selection:
self.data.append(((FitRemoveFighterCommand, fitID, fit.fighters.index(x)), (FitAddFighterCommand, fitID, itemID)))
elif context == 'droneItem':
for x in selection:
self.data.append(((FitChangeDroneVariationCommand, fitID, fit.drones.index(x), itemID),),)
def Do(self):
for cmds in self.data:
for cmd in cmds:
self.internal_history.Submit(cmd[0](*cmd[1:]))
self.sFit.recalc(self.fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID))
return True
def Undo(self):
for _ in self.internal_history.Commands:
self.internal_history.Undo()
self.sFit.recalc(self.fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID))
return True
|
nickmckay/LiPD-utilities | Python/lipd/dataframes.py | Python | gpl-2.0 | 12,355 | 0.002266 | import collections
import pandas as pd
from .regexes import re_pandas_x_num
from .loggers import create_logger
from .alternates import DATA_FRAMES
from .misc import unwrap_arrays, match_arr_lengths
logger_dataframes = create_logger("PDSlib")
def _dotnotation_for_nested_dictionary(d, key, dots):
"""
Flattens nested data structures using dot notation.
:param dict d: Original or nested dictionary
:param str key:
:param dict dots: Dotted dictionary so far
:return dict: Dotted dictionary so far
"""
if key == 'chronData':
# Not interested in expanding chronData in dot notatio | n. Keep it as a chunk.
dots[key] = d
elif isinstance(d, dict):
for k in d:
_dotnotation_for_nested_dictionary(d[k], key + '.' + k if key else k, dots)
elif isinstance(d, list) and \
not all(isinstance(item, (int, float | , complex, list)) for item in d):
for n, d in enumerate(d):
_dotnotation_for_nested_dictionary(d, key + '.' + str(n) if key != "" else key, dots)
else:
dots[key] = d
return dots
def create_dataframe(ensemble):
"""
Create a data frame from given nested lists of ensemble data
:param list ensemble: Ensemble data
:return obj: Dataframe
"""
logger_dataframes.info("enter ens_to_df")
# "Flatten" the nested lists. Bring all nested lists up to top-level. Output looks like [ [1,2], [1,2], ... ]
ll = unwrap_arrays(ensemble)
# Check that list lengths are all equal
valid = match_arr_lengths(ll)
if valid:
# Lists are equal lengths, create the dataframe
df = pd.DataFrame(ll)
else:
# Lists are unequal. Print error and return nothing.
df = "empty"
print("Error: Numpy Array lengths do not match. Cannot create data frame")
logger_dataframes.info("exit ens_to_df")
return df
def lipd_to_df(metadata, csvs):
"""
Create an organized collection of data frames from LiPD data
:param dict metadata: LiPD data
:param dict csvs: Csv data
:return dict: One data frame per table, organized in a dictionary by name
"""
dfs = {}
logger_dataframes.info("enter lipd_to_df")
# Flatten the dictionary, but ignore the chron data items
dict_in_dotted = {}
logger_dataframes.info("enter dot_notation")
_dotnotation_for_nested_dictionary(metadata, '', dict_in_dotted)
dict_in_dotted = collections.OrderedDict(sorted(dict_in_dotted.items()))
# Create one data frame for metadata items
dfs["metadata"] = pd.DataFrame(list(dict_in_dotted.items()), columns=["Key", "Value"])
# Create data frames for paleo data and chron data items. This does not use LiPD data, it uses the csv data
dfs.update(_get_dfs(csvs))
return dfs
def ts_to_df(metadata):
"""
Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name
"""
logger_dataframes.info("enter ts_to_df")
dfs = {}
# Plot the variable + values vs year, age, depth (whichever are available)
dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata))
# Plot the chronology variables + values in a data frame
dfs["chronData"] = _get_key_data(metadata, "chronData_df")
# Take out the chronData pandas data frame object if it exists in the metadata
# Otherwise, the data frame renderer gets crazy and errors out.
if "chronData_df" in metadata:
del metadata["chronData_df"]
s = collections.OrderedDict(sorted(metadata.items()))
# Put key-vars in a data frame to make it easier to visualize
dfs["metadata"] = pd.DataFrame(list(s.items()), columns=['Key', 'Value'])
logger_dataframes.info("exit ts_to_df")
return dfs
def _plot_ts_cols(ts):
"""
Get variable + values vs year, age, depth (whichever are available)
:param dict ts: TimeSeries dictionary
:return dict: Key: variableName, Value: Panda Series object
"""
logger_dataframes.info("enter get_ts_cols()")
d = {}
# Not entirely necessary, but this will make the column headers look nicer for the data frame
# The column header will be in format "variableName (units)"
try:
units = " (" + ts["paleoData_units"] + ")"
except KeyError as e:
units = ""
logger_dataframes.warn("get_ts_cols: KeyError: paleoData_units not found, {}".format(e))
try:
d[ts["paleoData_variableName"] + units] = ts["paleoData_values"]
except KeyError as e:
logger_dataframes.warn("get_ts_cols: KeyError: variableName or values not found, {}".format(e))
# Start looking for age, year, depth columns
for k, v in ts.items():
if re_pandas_x_num.match(k):
try:
units = " (" + ts[k + "Units"] + ")"
d[k + units] = v
except KeyError as e:
logger_dataframes.warn("get_ts_cols: KeyError: Special column units, {}, {}".format(k, e))
logger_dataframes.info("exit get_ts_cols: found {}".format(len(d)))
return d
def _get_dfs(csvs):
"""
LiPD Version 1.2
Create a data frame for each table for the given key
:param dict csvs: LiPD metadata dictionary
:return dict: paleo data data frames
"""
logger_dataframes.info("enter get_lipd_cols")
# placeholders for the incoming data frames
dfs = {"chronData": {}, "paleoData": {}}
try:
for filename, cols in csvs.items():
tmp = {}
for var, data in cols.items():
tmp[var] = pd.Series(data["values"])
if "chron" in filename.lower():
dfs["chronData"][filename] = pd.DataFrame(tmp)
elif "paleo" in filename.lower():
dfs["paleoData"][filename] = pd.DataFrame(tmp)
except KeyError:
logger_dataframes.warn("get_lipd_cols: AttributeError: expected type dict, given type {}".format(type(csvs)))
logger_dataframes.info("exit get_lipd_cols")
return dfs
def _get_key_data(d, key):
"""
Generic function to grab dictionary data by key with error handling
:return:
"""
d2 = ""
try:
d2 = d[key]
except KeyError:
logger_dataframes.info("get_key_data: KeyError: {}".format(key))
return d2
def get_filtered_dfs(lib, expr):
"""
Main: Get all data frames that match the given expression
:return dict: Filenames and data frames (filtered)
"""
logger_dataframes.info("enter get_filtered_dfs")
dfs = {}
tt = None
# Process all lipds files or one lipds file?
specific_files = _check_expr_filename(expr)
# Determine the table type wanted
if "chron" in expr:
tt = "chron"
elif "paleo" in expr:
tt = "paleo"
# Get all filenames of target type.
if tt:
if specific_files:
# The user has specified a single LiPD file to get data frames from.
for file in specific_files:
if file in lib:
lo_meta = lib[file].get_metadata()
lo_dfs = lib[file].get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching filenames
filenames = _match_dfs_expr(lo_meta, expr, tt)
# Update our output data frames dictionary
dfs.update(_match_filenames_w_dfs(filenames, lo_dfs))
else:
print("Unable to find LiPD file in Library: {}".format(file))
# Process all LiPD files in the library. A file has not been specified in the expression.
else:
# Loop once on each lipds object in the library
for ln, lo in lib.items():
# Get the
lo_meta = lo.get_metadata()
lo_dfs = lo.get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching fil |
foursquare/pants | src/python/pants/backend/docgen/targets/doc.py | Python | apache-2.0 | 4,885 | 0.007574 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.base.hash_utils import stable_json_hash
from pants.base.payload import Payload
from pants.base.payload_field import PayloadField, PrimitiveField, combine_hashes
from pants.build_graph.target import Target
class WikiArtifact(object):
"""Binds a single documentation page to a wiki instance.
This object allows you to specify which wiki a page should be published to, along with additional
wiki-specific parameters, such as the title, parent page, etc.
"""
def __init__(self, wiki, **kwargs):
"""
:param wiki: target spec of a ``wiki``.
:param kwargs: a dictionary that may contain configuration directives for your particular wiki.
For example, the following keys are supported for Atlassian's Confluence:
* ``space`` -- A wiki space in which to place the page (used in Confluence)
* ``title`` -- A title for the wiki page
* ``parent`` -- The title of a wiki page that will denote this page as a child.
"""
self.wiki = wiki
self.config = kwargs
def fingerprint(self):
return combine_hashes([self.wiki.fingerprint(), stable_json_hash(self.config)])
def __str__(self):
return self.wiki.name
class Wiki(object):
"""Identifies a wiki where pages can be published."""
def __init__(self, name, url_builder):
"""
:param url_builder: Function that accepts a page target and an optional wiki config dict.
"""
self.name = name
self.url_builder = url_builder
def fingerprint(self):
# TODO: url_builder is not a part of fingerprint.
return stable_json_hash(self.name)
class Page(Target):
"""A documentation page.
Here is an example, that shows a markdown page providing a wiki page on an Atlassian Confluence
wiki: ::
page(name='mypage',
source='mypage.md',
provides=[
wiki_artifact(wiki=Wiki('foozle', <url builder>),
space='my_space',
title='my_page',
parent='my_parent'),
],
)
A ``page`` can have more than one ``wiki_artifact`` in its ``provides``
(there might be more than one place to publish it).
"""
class ProvidesTupleField(tuple, PayloadField):
def _compute_fingerprint(self):
return combine_hashes(artifact.fingerprint() for artifact in self)
def __init__(self,
sources,
address=None,
payload=None,
format=None,
links=None,
provides=None,
**kwargs):
"""
:param sources: Page s | ource file. Exactly one will be present.
:param format: Page's format, ``md`` or ``rst``. By default, Pants infers | from ``source`` file
extension: ``.rst`` is ReStructured Text; anything else is Markdown.
:param links: Other ``page`` targets that this `page` links to.
:type links: List of target specs
:param provides: Optional "Addresses" at which this page is published.
E.g., a wiki location.
:type provides: List of ``wiki_artifact``s
"""
payload = payload or Payload()
if not format:
if sources.files[0].lower().endswith('.rst'):
format = 'rst'
else:
format = 'md'
payload.add_fields({
'sources': self.create_sources_field(sources=sources,
sources_rel_path=address.spec_path,
key_arg='sources'),
'format': PrimitiveField(format),
'links': PrimitiveField(links or []),
'provides': self.ProvidesTupleField(provides or []),
})
super(Page, self).__init__(address=address, payload=payload, **kwargs)
if provides and not isinstance(provides[0], WikiArtifact):
raise ValueError('Page must provide a wiki_artifact. Found instead: {}'.format(provides))
@property
def source(self):
"""The first (and only) source listed by this Page."""
return list(self.payload.sources.source_paths)[0]
@classmethod
def compute_injectable_specs(cls, kwargs=None, payload=None):
for spec in super(Page, cls).compute_injectable_specs(kwargs, payload):
yield spec
target_representation = kwargs or payload.as_dict()
for spec in target_representation.get('links', []):
yield spec
@property
def provides(self):
"""A tuple of WikiArtifact instances provided by this Page.
Notably different from JvmTarget.provides, which has only a single Artifact rather than a
list.
"""
return self.payload.provides
@property
def format(self):
"""Returns this page's format, 'md' (Markdown) or 'rst' (ReStructured Text)."""
return self.payload.format
|
HeadsUpDisplayInc/mbed | tools/targets/REALTEK_RTL8195AM.py | Python | apache-2.0 | 10,067 | 0.003675 | """
Realtek Semiconductor Corp.
RTL8195A elf2bin script
"""
import sys, array, struct, os, re, subprocess
import hashlib
import shutil
from tools.paths import TOOLS_BOOTLOADERS
from tools.toolchains import TOOLCHAIN_PATHS
from datetime import datetime
# Constant Variables
RAM2_RSVD = 0x00000000
RAM2_VER = 0x8195FFFF00000000
RAM2_TAG = 0x81950001
RAM2_SHA = '0'
def write_fixed_width_string(value, width, output):
# cut string to list & reverse
line = [value[i:i+2] for i in range(0, len(value), 2)]
output.write("".join([chr(long(b, 16)) for b in line]))
def write_fixed_width_value(value, width, output):
# convert to string
line = format(value, '0%dx' % (width))
if len(line) > width:
print "[ERROR] value 0x%s cannot fit width %d" % (line, width)
sys.exit(-1)
# cut string to list & reverse
line = [line[i:i+2] for i in range(0, len(line), 2)]
line.reverse()
# convert to write buffer
output.write("".join([chr(long(b, 16)) for b in line]))
def append_image_file(image, output):
input = open(image, "rb")
output.write(input.read())
input.close()
def write_padding_bytes(output_name, size):
current_size = os.stat(output_name).st_size
padcount = size - current_size
if padcount < 0:
print "[ERROR] image is larger than expected size"
sys.exit(-1)
output = open(output_name, "ab")
output.write('\377' * padcount)
output.close()
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def get_version_by_time():
secs = int((datetime.now()-datetime(2016,11,1)).total_seconds())
return RAM2_VER + secs
# ----------------------------
# main function
# ----------------------------
def prepend(image, entry, segment, image_ram2, image_ota):
# parse input arguments
output = open(image_ram2, "wb")
write_fixed_width_value(os.stat(image).st_size, 8, output)
write_fixed_width_value(int(entry), 8, output)
write_fixed_width_value(int(segment), 8, output)
RAM2_SHA = sha256_checksum(image)
write_fixed_width_value(RAM2_TAG, 8, output)
write_fixed_width_value(get_version_by_time(), 16, output)
write_fixed_width_string(RAM2_SHA, 64, output)
write_fixed_width_value(RAM2_RSVD, 8, output)
append_image_file(image, output)
output.close()
ota = open(image_ota, "wb")
write_fixed_width_value(os.stat(image).st_size, 8, ota)
write_fixed_width_value(int(entry), 8, ota)
write_fixed_width_value(int(segment), 8, ota)
write_fixed_width_value(0xFFFFFFFF, 8, ota)
write_fixed_width_value(get_version_by_time(), 16, ota)
write_fixed_width_string(RAM2_SHA, 64, ota)
write_fixed_width_value(RAM2_RSVD, 8, ota)
append_image_file(image, ota)
ota.close()
def find_symbol(toolchain, mapfile, symbol):
ret = None
HEX = '0x0{,8}(?P<addr>[0-9A-Fa-f]{8})'
if toolchain == "GCC_ARM":
SYM = re.compile(r'^\s+' + HEX + r'\s+' + symbol + '\r?$')
elif toolchain in ["ARM_STD", "ARM", "ARM_MICRO"]:
SYM = re.compile(r'^\s+' + HEX + r'\s+0x[0-9A-Fa-f]{8}\s+Code.*\s+i\.' + symbol + r'\s+.*$')
elif toolchain == "IAR":
SYM = re.compile(r'^' + symbol + r'\s+' + HEX + '\s+.*$')
with open(mapfile, 'r') as infile:
for line in infile:
match = re.match(SYM, line)
if match:
ret = match.group("addr")
if not ret:
print "[ERROR] cannot find the address of symbol " + symbol
return 0
return int(ret,16) | 1
def parse_load_segment_gcc(image_elf):
# Program Headers:
# Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
# LOAD 0x000034 0x1000 | 6000 0x10006000 0x026bc 0x026bc RW 0x8
# LOAD 0x0026f0 0x30000000 0x30000000 0x06338 0x06338 RWE 0x4
segment_list = []
cmd = os.path.join(TOOLCHAIN_PA | THS['GCC_ARM'], 'arm-none-eabi-readelf')
cmd = '"' + cmd + '"' + ' -l ' + image_elf
for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"):
if not line.startswith(" LOAD"):
continue
segment = line.split()
if len(segment) != 8:
continue
offset = int(segment[1][2:], 16)
addr = int(segment[2][2:], 16)
size = int(segment[4][2:], 16)
if addr != 0 and size != 0:
segment_list.append((offset, addr, size))
return segment_list
def parse_load_segment_armcc(image_elf):
# ====================================
#
# ** Program header #2
#
# Type : PT_LOAD (1)
# File Offset : 52 (0x34)
# Virtual Addr : 0x30000000
# Physical Addr : 0x30000000
# Size in file : 27260 bytes (0x6a7c)
# Size in memory: 42168 bytes (0xa4b8)
# Flags : PF_X + PF_W + PF_R + PF_ARM_ENTRY (0x80000007)
# Alignment : 8
#
(offset, addr, size) = (0, 0, 0)
segment_list = []
in_segment = False
cmd = os.path.join(TOOLCHAIN_PATHS['ARM'], 'bin', 'fromelf')
cmd = '"' + cmd + '"' + ' --text -v --only=none ' + image_elf
for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"):
if line == "":
pass
elif line.startswith("** Program header"):
in_segment = True
elif in_segment == False:
pass
elif line.startswith("============"):
if addr != 0 and size != 0:
segment_list.append((offset, addr, size))
in_segment = False
(offset, addr, size) = (0, 0, 0)
elif line.startswith(" Type"):
if not re.match(r'\s+Type\s+:\s+PT_LOAD\s.*$', line):
in_segment = False
elif line.startswith(" File Offset"):
match = re.match(r'^\s+File Offset\s+:\s+(?P<offset>\d+).*$', line)
if match:
offset = int(match.group("offset"))
elif line.startswith(" Virtual Addr"):
match = re.match(r'^\s+Virtual Addr\s+:\s+0x(?P<addr>[0-9a-f]+).*$', line)
if match:
addr = int(match.group("addr"), 16)
elif line.startswith(" Size in file"):
match = re.match(r'^\s+Size in file\s+:.*\(0x(?P<size>[0-9a-f]+)\).*$', line)
if match:
size = int(match.group("size"), 16)
return segment_list
def parse_load_segment_iar(image_elf):
# SEGMENTS:
#
# Type Offset Virtual Physical File Sz Mem Sz Flags Align
# ---- ------ ------- -------- ------- ------ ----- -----
# 0: load 0x34 0x10006000 0x10006000 0x26bc 0x26bc 0x6 WR 0x8
# 1: load 0x26f0 0x30000000 0x30000000 0x6338 0x6338 0x7 XWR 0x4
#
# SECTIONS:
#
# Name Type Addr Offset Size Aln Lnk Inf ESz Flags
# ---- ---- ---- ------ ---- --- --- --- --- -----
# 1: .shstrtab strtab 0xfc4d8 0x60 0x4
# 2: .strtab strtab 0xfc538 0xbb3f 0x4
segment_list = []
in_segment = False
cmd = os.path.join(TOOLCHAIN_PATHS['IAR'], 'bin', 'ielfdumparm')
cmd = '"' + cmd + '"' + ' ' + image_elf
for line in subprocess.check_output(cmd, shell=True, universal_newlines=True).split("\n"):
if line.startswith(" SEGMENTS:"):
in_segment = True
elif in_segment == False:
pass
elif line.startswith(" SECTIONS:"):
break
elif re.match(r'^\s+\w+:\s+load\s+.*$', line):
segment = line.split()
offset = int(segment[2][2:], 16)
addr = int(segment[3][2:], 16)
size = int(segment[5][2:], 16)
if addr < 0x10007000:
continue
if addr != 0 and size != 0:
segment_list.append((offset, addr, size))
retur |
janchorowski/fuel | fuel/converters/dogs_vs_cats.py | Python | mit | 4,138 | 0 | import os
import zipfile
import h5py
import numpy
from PIL import Image
from fuel.converters.base import check_exists, progress_bar
from fuel.datasets.hdf5 import H5PYDataset
TRAIN = 'dogs_vs_cats.train.zip'
TEST = 'dogs_vs_cats.test1.zip'
@check_exists(required_files=[TRAIN, TEST])
def convert_dogs_vs_cats(directory, output_directory,
output_filename='dogs_vs_cats.hdf5'):
"""Converts the Dogs vs. Cats dataset to HDF5.
Converts the Dogs vs. Cats dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.dogs_vs_cats`. The converted dataset is saved as
'dogs_vs_cats.hdf5'.
It assumes the existence of the following files:
* `dogs_vs_cats.train.zip`
* `dogs_vs_cats.test1.zip`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'dogs_vs_cats.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
# Prepare output file
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf_features = h5file.create_dataset('image_features', (37500,),
dtype=dtype)
hdf_shapes = h5file.create_dataset('image_features_shapes', (37500, 3),
dtype='int32')
hdf_labels = h5file.create_dataset('targets', (37500, 1), dtype='uint8')
# Attach shape annotations and scales
hdf_features.dims.create_scale(hdf_shapes, 'shapes')
hdf_features.dims[0].attach_scale(hdf_shapes)
hdf_shapes_labels = h5file.create_dataset('image_features_shapes_labels',
(3,), dtype='S7')
hdf_shapes_labels[...] = ['channel'.encode('utf8'),
'height'.encode('utf8'),
'width'.encode('utf8')]
hdf_features.dims.create_scale(hdf_shapes_labels, 'shape_labels')
hdf_features.dims[0].attach_scale(hdf_shapes_labels)
# Add axis annotations
hdf_features.dims[0].label = 'batch'
hdf_labels.dims[0].label = 'batch'
hdf_labels.dims[1].label = 'index'
# Convert
i = 0
for split, split_size in zip([TRAIN, TEST], [25000, 12500]):
# Open the ZIP file
filename = os.path.join(directory, split)
zip_file = zipfile.ZipFile(filename, 'r')
image_names = zip_file.namelist()[1:] # Discard the directory name
# Shuffle the examples
rng = numpy.random.RandomState(123522)
rng.shuffle(image_names)
# Convert from JPEG to NumPy arrays
with progress_bar(filename, split_size) as bar:
for image_name in image_names:
# Save image
image = numpy.array(Image.open(zip_file.open(image_name)))
image = image.transpose(2, 0, 1)
hdf_features[i] = image.flatten()
hdf_shapes[i] = image.shape
# Cats are 0, Dogs are 1
hdf_labels[i] = 0 if 'cat' in image_name else 1
# Update progress
i += 1
bar.update(i if split == TRAIN else i - 25000)
# Add the labels
split_dict = {}
sources = ['image_features', 'targets']
for name, slice_ in zip(['train', 'test'],
[(0, 25000), (25000, 37500)]):
split_dict[name] = dict(zip(sources, [slice_] * len(sources)))
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to | convert the | dogs_vs_cats dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `dogs_vs_cats` command.
"""
return convert_dogs_vs_cats
|
rahulunair/nova | nova/tests/functional/wsgi/test_secgroup.py | Python | apache-2.0 | 3,421 | 0 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import testscenarios
from nova import test
from nova.tests import fixtures as nova_fixtures
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
LOG = logging.getLogger(__name__)
# TODO(stephenfin): Add InstanceHelperMixin
class SecgroupsFullstack(testscenarios.WithScenarios, test.TestCase):
"""Tests for security groups
TODO: describe security group API
TODO: define scope
"""
REQUIRES_LOCKING = True
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
# This test uses ``testscenarios`` which matrix multiplies the
# test across the scenarios listed below setting the attributes
# in the dictionary on ``self`` for each scenario.
scenarios = [
('v2', {
'api_major_version': 'v2'}),
# test v2.1 base microversion
('v2_1', {
'api_major_version': 'v2.1'}),
]
def setUp(self):
super(SecgroupsFullstack, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture())
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
# TODO(sdague): refactor this method into the API client, we're
# going to use it a lot
def _build_minimal_create_server_request(self, name):
server = {}
image = self.api.get_images()[0]
LOG.info("Image: %s", image)
if self. | _image_ref_parameter in imag | e:
image_href = image[self._image_ref_parameter]
else:
image_href = image['id']
image_href = 'http://fake.server/%s' % image_href
# We now have a valid imageId
server[self._image_ref_parameter] = image_href
# Set a valid flavorId
flavor = self.api.get_flavors()[1]
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
server['name'] = name
return server
def test_security_group_fuzz(self):
"""Test security group doesn't explode with a 500 on bad input.
Originally reported with bug
https://bugs.launchpad.net/nova/+bug/1239723
"""
server = self._build_minimal_create_server_request("sg-fuzz")
# security groups must be passed as a list, this is an invalid
# format. The jsonschema in v2.1 caught it automatically, but
# in v2 we used to throw a 500.
server['security_groups'] = {"name": "sec"}
resp = self.api.api_post('/servers', {'server': server},
check_response_status=False)
self.assertEqual(400, resp.status)
|
avikivity/scylla | dist/docker/redhat/commandlineparser.py | Python | agpl-3.0 | 2,930 | 0.006826 | import argparse
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--developer-mode', default='1', choices=['0', '1'], dest='developerMode')
parser.add_argument('--experimental', default=0, choices=['0', '1'])
parser.add_argument('--seeds', default=None, help="specify seeds - if left empty will use container's own IP")
parser.add_argument('--cpuset', default=None, help="e.g. --cpuset 0-3 for the first four CPUs")
parser.add_argument('--smp', default=None, help="e.g --smp 2 to use two CPUs")
parser.add_argument('--memory', default=None, help="e.g. --memory 1G to use 1 GB of RAM")
parser.add_argument('--reserve-memory', default=None, dest='reserveMemory', help="e.g. --reserve-memory 1G to reserve 1 GB of RAM")
parser.add_argument('--overprovisioned', default=None, choices=['0', '1'],
help="run in overprovisioned environment. By default it will run in overprovisioned mode unless --cpuset is specified")
parser.add_argument('--io-setup', def | ault='1', choices=['0', '1'], dest='io_setup', help='Run I/O setup (i.e. iotune) at container startup. Defaults to 1.')
parser.add_argument('--listen-address', default=None, dest='listenAddress')
parser.add_argu | ment('--rpc-address', default=None, dest='rpcAddress')
parser.add_argument('--broadcast-address', default=None, dest='broadcastAddress')
parser.add_argument('--broadcast-rpc-address', default=None, dest='broadcastRpcAddress')
parser.add_argument('--api-address', default=None, dest='apiAddress')
parser.add_argument('--alternator-address', default=None, dest='alternatorAddress', help="Alternator API address to listen to. Defaults to listen address.")
parser.add_argument('--alternator-port', default=None, dest='alternatorPort', help="Alternator API port to listen to. Disabled by default.")
parser.add_argument('--alternator-https-port', default=None, dest='alternatorHttpsPort', help="Alternator API TLS port to listen to. Disabled by default.")
parser.add_argument('--alternator-write-isolation', default=None, dest='alternatorWriteIsolation', help="Alternator default write isolation policy.")
parser.add_argument('--disable-version-check', default=False, action='store_true', dest='disable_housekeeping', help="Disable version check")
parser.add_argument('--authenticator', default=None, dest='authenticator', help="Set authenticator class")
parser.add_argument('--authorizer', default=None, dest='authorizer', help="Set authorizer class")
parser.add_argument('--cluster-name', default=None, dest='clusterName', help="Set cluster name")
parser.add_argument('--endpoint-snitch', default=None, dest='endpointSnitch', help="Set endpoint snitch")
parser.add_argument('--replace-address-first-boot', default=None, dest='replaceAddressFirstBoot', help="IP address of a dead node to replace.")
return parser.parse_args()
|
bitmaintech/p2pool | p2pool/bitcoin/data.py | Python | gpl-3.0 | 10,333 | 0.007936 | from __future__ import division
import hashlib
import random
import warnings
import p2pool
from p2pool.util import math, pack
import struct
def hash256(data):
return pack.IntType(256).unpack(hashlib.sha256(hashlib.sha256(data).digest()).digest())
def hash160(data):
if data == '04ffd03de44a6e11b9917f3a29f9443283d9871c9d743ef30d5eddcd37094b64d1b3d8090496b53256786bf5c82932ec23c3b74d9f05a6f95a8b5529352656664b'.decode('hex'):
return 0x384f570ccc88ac2e7e00b026d1690a3fca63dd0 # hack for people who don't have openssl - this is the only value that p2pool ever hashes
return pack.IntType(160).unpack(hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest())
def Bits2Target(bits):
return struct.unpack('<L', bits[:3] + b'\0')[0] * 2**(8*(bits[3] - 3))
class ChecksummedType(pack.Type):
def __init__(self, inner, checksum_func=lambda data: hashlib.sha256(hashlib.sha256(data).digest()).digest()[:4]):
self.inner = inner
self.checksum_func = checksum_func
def read(self, file):
obj, file = self.inner.read(file)
data = self.inner.pack(obj)
calculated_checksum = self.checksum_func(data)
checksum, file = pack.read(file, len(calculated_checksum))
if checksum != calculated_checksum:
raise ValueError('invalid checksum')
return obj, file
def write(self, file, item):
data = self.inner.pack(item)
return (file, data), self.checksum_func(data)
class FloatingInteger(object):
__slots__ = ['bits', '_target']
@classmethod
def from_target_upper_bound(cls, target):
n = math.natural_to_string(target)
if n and ord(n[0]) >= 128:
n = '\x00' + n
bits2 = (chr(len(n)) + (n + 3*chr(0))[:3])[::-1]
bits = pack.IntType(32).unpack(bits2)
return cls(bits)
def __init__(self, bits, target=None):
self.bits = bits
self._target = None
if target is not None and self.target != target:
raise ValueError('target does not match')
@property
def target(self):
res = self._target
if res is None:
res = self._target = math.shift_left(self.bits & 0x00ffffff, 8 * ((self.bits >> 24) - 3))
return res
def __hash__(self):
return hash(self.bits)
def __eq__(self, other):
return self.bits == other.bits
def __ne__(self, other):
return not (self == other)
def __cmp__(self, other):
assert False
def __repr__(self):
return 'FloatingInteger(bits=%s, target=%s)' % (hex(self.bits), hex(self.target))
class FloatingIntegerType(pack.Type):
_inner = pack.IntType(32)
def read(self, file):
bits, file = self._inner.read(file)
return FloatingInteger(bits), file
def write(self, file, item):
return self._inner.write(file, item.bits)
address_type = pack.ComposedType([
('services', pack.IntType(64)),
('address', pack.IPV6AddressType()),
('port', pack.IntType(16, 'big')),
])
tx_type = pack.ComposedType([
('version', pack.IntType(32)),
('tx_ins', pack.ListType(pack.ComposedType([
('previous_output', pack.PossiblyNoneType(dict(hash=0, index=2**32 - 1), pack.ComposedType([
('hash', pack.IntType(256)),
('index', pack.IntType(32)),
]))),
('script', pack.VarStrType()),
('sequence', pack.PossiblyNoneType(2**32 - 1, pack.IntType(32))),
]))),
('tx_outs', pack.ListType(pack.ComposedType([
('value', pack.IntType(64)),
('script', pack.VarStrType()),
]))),
('lock_time', pack.IntType(32)),
])
merkle_link_type = pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(32)),
])
merkle_tx_type = pack.ComposedType([
('tx', tx_type),
('block_hash', pack.IntType(256)),
('merkle_link', merkle_link_type),
])
block_header_type = pack.ComposedType([
('version', pack.IntType(32)),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('merkle_root', pack.IntType(256)),
('timestamp', pack.IntType(32)),
('bits', FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
block_type = pack.ComposedType([
('header', block_header_type),
('txs', pack.ListType(tx_type)),
])
# merged mining
aux_pow_type = pack.ComposedType([
('merkle_tx', merkle_tx_type),
('merkle_link', merkle_link_type),
('parent_block_header', block_header_type),
])
aux_pow_coinbase_type = pack.ComposedType([
('merkle_root', pack.IntType(256, 'big')),
('size', pack.IntType(32)),
('nonce', pack.IntType(32)),
])
def make_auxpow_tree(chain_ids):
for size in (2**i for i in xrange(31)):
if size < len(chain_ids):
continue
res = {}
for chain_id in chain_ids:
pos = (1103515245 * chain_id + 1103515245 * 12345 + 12345) % size
if pos in res:
break
res[pos] = chain_id
else:
return res, size
raise AssertionError()
# merkle trees
merkle_record_type = pack.ComposedType([
('left', pack.IntType(256)),
('right', pack.IntType(256)),
])
def merkle_hash(hashes):
if not hashes:
return 0
hash_list = list(hashes)
while len(hash_list) > 1:
hash_list = [hash256(merkle_record_type.pack(dict(left=lef | t, right=right)))
for left, right in zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])]
return hash_list[0]
def calculate_merkle_link(hashes, index):
# XXX optimize this
hash_list = [(lambda _h=h: _h, i == index, []) for i, h in enumerate(hashes)]
while len(hash_list) > 1:
hash_list = [
(
lambda _left=left, _right=right: hash256(merkle_record_type.pack(dict(l | eft=_left(), right=_right()))),
left_f or right_f,
(left_l if left_f else right_l) + [dict(side=1, hash=right) if left_f else dict(side=0, hash=left)],
)
for (left, left_f, left_l), (right, right_f, right_l) in
zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])
]
res = [x['hash']() for x in hash_list[0][2]]
assert hash_list[0][1]
if p2pool.DEBUG:
new_hashes = [random.randrange(2**256) if x is None else x
for x in hashes]
assert check_merkle_link(new_hashes[index], dict(branch=res, index=index)) == merkle_hash(new_hashes)
assert index == sum(k*2**i for i, k in enumerate([1-x['side'] for x in hash_list[0][2]]))
return dict(branch=res, index=index)
def check_merkle_link(tip_hash, link):
if link['index'] >= 2**len(link['branch']):
raise ValueError('index too large')
return reduce(lambda c, (i, h): hash256(merkle_record_type.pack(
dict(left=h, right=c) if (link['index'] >> i) & 1 else
dict(left=c, right=h)
)), enumerate(link['branch']), tip_hash)
# targets
def target_to_average_attempts(target):
assert 0 <= target and isinstance(target, (int, long)), target
if target >= 2**256: warnings.warn('target >= 2**256!')
return 2**256//(target + 1)
def average_attempts_to_target(average_attempts):
assert average_attempts > 0
return min(int(2**256/average_attempts - 1 + 0.5), 2**256-1)
def target_to_difficulty(target):
assert 0 <= target and isinstance(target, (int, long)), target
if target >= 2**256: warnings.warn('target >= 2**256!')
return (0xffff0000 * 2**(256-64) + 1)/(target + 1)
def difficulty_to_target(difficulty):
assert difficulty >= 0
if difficulty == 0: return 2**256-1
return min(int((0xffff0000 * 2**(256-64) + 1)/difficulty - 1 + 0.5), 2**256-1)
# human addresses
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def base58_encode(bindata):
bindata2 = bindata.lstrip(chr(0))
return base58_alphabet[0]*(len(bindata) - len(bindata2)) + math.natural_to_string(math.string_to_natural(bindata2), base58_alphabet)
def base58_decode(b58data):
b58data2 = b58data.lstrip(base58_al |
askervin/gdbsearch | setup.py | Python | gpl-3.0 | 377 | 0.002653 | #!/ | usr/bin/env python
from distutils.core import setup
setup(name='gdbsearch',
version='0.1',
description='Search for interesting lines of code',
author='Antti Kervinen',
author_email='antti.kervinen@gmail.com',
url='http://www.github.com/antti | .kervinen/gdbsearch',
license='GNU General Public License',
scripts=['gdbsearch']
)
|
sgarrity/bedrock | tests/pages/firefox/channel/ios.py | Python | mpl-2.0 | 590 | 0 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of | the MPL was not distributed with this
# | file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.firefox.base import FirefoxBasePage
class ChannelIOSPage(FirefoxBasePage):
URL_TEMPLATE = '/{locale}/firefox/channel/ios/'
_testflight_button_locator = (By.CLASS_NAME, 'testflight-cta')
@property
def is_testflight_button_displayed(self):
return self.is_element_displayed(*self._testflight_button_locator)
|
rajashreer7/autotest-client-tests | linux-tools/prelink/prelink.py | Python | gpl-2.0 | 1,662 | 0.004813 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class prelink(test.test):
"""
Autotest module for testing basic functionality
of prelink
@author Athira Rajeev <atrajeev@in.ibm.com>
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
for package in ['gcc', 'gcc-c++']:
if not sm.check_installed(package):
logging.debug("%s missing - trying to install", package)
sm.install(package)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess. | Popen(['./prelink.sh'], cwd="%s/prelink" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is | non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
voytekresearch/fakespikes | fakespikes/neurons.py | Python | mit | 4,820 | 0.001245 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.random import RandomState
from fakespikes.util import to_spikes
from fakespikes.rates import constant
class Spikes(object):
"""Simulates statistical models of neural spiking
Params
------
n : int
Number of neurons
t : float
Simulation time (seconds)
dt : float
Time-step (seconds)
refractory : float
Absolute refractory time
seed : None, int, RandomState
The random seed
private_stdev : float
Amount of stdev noise to add to each neurons tuning respose
"""
def __init__(self, n, t, dt=0.001, refractory=0.002, seed=None,
private_stdev=0):
# Ensure reproducible randomess
self.seed = seed
if isinstance(seed, RandomState):
self.prng = seed
elif self.seed is not None:
self.prng = np.random.RandomState(seed)
else:
self.prng = np.random.RandomState()
# Init constraints
if n < 2:
raise ValueError("n must be greater than 2")
if dt > 0.001:
raise ValueError("dt must be less than 0.001 seconds (1 ms)")
if not np.allclose(refractory / dt, int(refractory / dt)):
raise ValueError("refractory must be integer multiple of dt")
self.n = n
self.refractory = refractory
# Timing
self.dt = dt
self.t = t
self.n_steps = int(self.t * (1.0 / self.dt))
self.times = np.linspace(0, self.t, self.n_steps)
self.private_stdev = private_stdev
self.refractory = refractory
# Create uniform sampling distributions for each neuron
self.unifs = np.vstack(
[self.prng.uniform(0, 1, self.n_steps) for i in range(self.n)]
).transpose()
def _constraints(self, drive):
if drive.shape != self.times.shape:
raise ValueError("Shape of `drive` didn't match times")
if drive.ndim != 1:
raise ValueError("`drive` must be 1d")
def _refractory(self, spks):
lw = int(self.refractory / self.dt) # len of refractory window
# If it spiked at t, delete spikes
# in the refractory window
for t in range(spks.shape[0]):
mask = spks[t, :]
for t_plus in range(lw):
spks[t_plus, :][mask] = 0
return spks
def poisson(self, rates):
"""Simulate Poisson firing
Params
------
rates : array-like, 1d, > 0
The firing rate
"""
self._constraints(rates) # does no harm to check twice
# No bias unless private_stdev is specified
biases = np.zeros(self.n)
if self.private_stdev > 0:
biases = self.prng.normal(0, self.private_stdev, size=self.n)
# Poisson method taken from
# http://www.cns.nyu.edu/~david/handouts/poisson.pdf
spikes = np.zeros_like(self.unifs, np.int)
for j in range(self.n):
mask = self.unifs[:, j] <= ((rates + biases[j]) * self.dt)
spikes[mask, j] = 1
return self._refractory(spikes)
def sync_bursts(self, a0, f, k, var=1e-3):
"""Create synchronous bursts (1 ms variance) of thalamic-ish spike
Params
------
f : numeric
Oscillation frequency (Hz)
k : numeric
Number of neuron to spike at a time
"""
if k > self.n:
raise ValueError("k is larger than N")
if f < 0:
raise ValueError("f must be greater then 0")
if k < 0:
raise ValueError("k must be greater then 0")
# Locate about where the pulses of spikes will go, at f,
wl = 1 / float(f)
n_pulses = int(self.t * f)
pulses = []
t_p = 0
for _ in range(n_pulses):
t_p += wl
# Gaurd against negative ts
if t_p > (3 * var):
pulses.append(t_p)
# and fill in the pulses with Gaussin distributed spikes.
Ns = range(self.n)
ts = []
ns = []
for t in pulses:
ts += list(t + self.prng.normal(0, var, k))
# Assign spikes to random neurons, at most
# one spike / neuron
self.prng.shuffle(Ns)
ns += list(Ns)[0:k]
ts = np.array(ts)
ns = np.array(ns)
# Just in case any negative time any slipped trough
| mask = ts > 0
ts = ts[mask]
ns = ns[mask]
spikes = to_spikes(ns, ts, self.t, self.n, self.dt)
# Create baseline firing
base = self.poisson(constant(self.times, a0))
spikes = base + spikes
spikes[spikes > 1] = 1
return spikes
| |
Captain-Coder/tribler | TriblerGUI/widgets/orderwidgetitem.py | Python | lgpl-3.0 | 1,786 | 0.003919 | import datetime
from PyQt5.QtWidgets import QTreeWidgetItem
from TriblerGUI.utilities import prec_div
class OrderWidgetItem(QTreeWidgetItem):
"""
This class represents a widget that displays an order.
"""
def __init__(self, parent, order, asset1_prec, asset2_prec):
QTreeWidgetItem.__init__(self, parent)
| self.order = order
order_time = datetime.datetime.fromtimestamp(int(order["timestamp"])).strftime('%Y-%m-%d %H:%M:%S')
self.to | tal_volume = prec_div(order["assets"]["first"]["amount"], asset1_prec)
self.traded_volume = prec_div(order["traded"], asset1_prec)
self.price = float(self.total_volume) / float(prec_div(order["assets"]["second"]["amount"], asset2_prec))
self.setText(0, "%s" % order["order_number"])
self.setText(1, order_time)
self.setText(2, "%g %s" % (self.price, order["assets"]["second"]["type"]))
self.setText(3, "%g %s" % (self.total_volume, order["assets"]["first"]["type"]))
self.setText(4, "%g %s" % (self.traded_volume, order["assets"]["first"]["type"]))
self.setText(5, "Sell" if order["is_ask"] else "Buy")
self.setText(6, "%s" % order["status"])
def __lt__(self, other):
column = self.treeWidget().sortColumn()
if column == 0:
return int(self.order["order_number"]) > int(other.order["order_number"])
if column == 1:
return int(self.order["timestamp"]) > int(other.order["timestamp"])
elif column == 2:
return self.price > other.price
elif column == 3:
return self.total_volume > other.total_volume
elif column == 4:
return self.traded_volume > other.traded_volume
return self.text(column) > other.text(column)
|
tchellomello/home-assistant | homeassistant/components/rfxtrx/cover.py | Python | apache-2.0 | 3,963 | 0.000252 | """Support for RFXtrx covers."""
import logging
from homeassistant.components.cover import CoverEntity
from homeassistant.const import CONF_DEVICES, STATE_OPEN
from homeassistant.core import callback
from . import (
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_SIGNAL_REPETITIONS,
DEFAULT_SIGNAL_REPETITIONS,
SIGNAL_EVENT,
RfxtrxCommandEntity,
get_device_id,
get_rfx_object,
)
from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up config entry."""
discovery_info = config_entry.data
device_ids = set()
def supported(event):
return event.device.known_to_be_rollershutter
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("I | nvalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
if device | _id in device_ids:
continue
device_ids.add(device_id)
entity = RfxtrxCover(
event.device, device_id, entity_info[CONF_SIGNAL_REPETITIONS]
)
entities.append(entity)
async_add_entities(entities)
@callback
def cover_update(event, device_id):
"""Handle cover updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added cover (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxCover(
event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event
)
async_add_entities([entity])
# Subscribe to main RFXtrx events
if discovery_info[CONF_AUTOMATIC_ADD]:
hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, cover_update)
class RfxtrxCover(RfxtrxCommandEntity, CoverEntity):
"""Representation of a RFXtrx cover."""
async def async_added_to_hass(self):
"""Restore device state."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_OPEN
@property
def is_closed(self):
"""Return if the cover is closed."""
return not self._state
async def async_open_cover(self, **kwargs):
"""Move the cover up."""
await self._async_send(self._device.send_open)
self._state = True
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down."""
await self._async_send(self._device.send_close)
self._state = False
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self._async_send(self._device.send_stop)
self._state = True
self.async_write_ha_state()
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.values["Command"] in COMMAND_ON_LIST:
self._state = True
elif event.values["Command"] in COMMAND_OFF_LIST:
self._state = False
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
self._apply_event(event)
self.async_write_ha_state()
|
Petr-Kovalev/nupic-win32 | py/regions/ImageSensorExplorers/ExhaustiveSweep.py | Python | gpl-3.0 | 11,962 | 0.007691 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
from nupic.regions.ImageSensorExplorers.BaseExplorer import BaseExplorer
class ExhaustiveSweep(BaseExplorer):
"""
This explorer performs an exhaustive raster scan through the input space.
By default, it iterates through images, filters, and sweep positions, with
sweep positions as the inner loop.
"""
def __init__(self, sweepDirections=["right", "down"], shiftDuringSweep=1,
shiftBetweenSweeps=1, sweepOffObject=False, order=None, *args, **kwargs):
"""
sweepDirections -- Directions for sweeping (a list containing one or
more of 'left', 'right', 'up', and 'down').
shiftDuringSweep -- Number of pixels to jump with each step (during a
sweep).
shiftBetweenSweeps -- Number of pixels to jump in between sweeps
(for example, when moving down a line after sweeping across).
sweepOffObject -- Whether the sensor can only include a part of the
object, as specified by the bounding box. If False, it will only move to
positions that include as much of the object as possible. If True, it
will sweep until all of the object moves off the sensor. If set to a floating
point number between 0 and 1, then it will sweep until that fraction of the
object moves off the sensor.
order -- Order in which to iterate (outer to inner). Default progresses
through switching images, filters, and sweeping, where switching images
is the outer loop and sweeping is the inner loop. Should be a list
containing 'image', 'sweep', and 0, 1, ... numFilters-1.
"""
BaseExplorer.__init__(self, *args, **kwargs)
for direction in sweepDirections:
if direction not in ('left', 'right', 'up', 'down'):
raise RuntimeError("Unknown sweep direction: '%s'" % direction)
if type(shiftDuringSweep) is not int:
raise RuntimeError("'shiftDuringSweep' must be an integer")
if type(shiftBetweenSweeps) is not int:
raise RuntimeError("'shiftBetweenSweeps' must be an integer")
if float(sweepOffObject) < 0 or float(sweepOffObject) > 1.0:
raise RuntimeError("'sweepOffObject' should be a boolean, or floating point"
" number between 0 and 1")
if order is not None:
if 'image' not in order or 'sweep' not in order:
raise RuntimeError("'order' must contain both 'image' and 'sweep'")
if len([x for x in order if type(x) == str]) > 2:
raise RuntimeError("'order' must contain no other strings besides "
"'image' and 'sweep'")
self.customOrder = True
else:
self.customOrder = False
self.sweepDirections = sweepDirections
self.shiftDuringSweep = shiftDuringSweep
self.shiftBetweenSweeps = shiftBetweenSweeps
self.sweepOffObject = sweepOffObject
self.order = order
def first(self):
"""
Set up the position.
BaseExplorer picks image 0, offset (0,0), etc., but explorers that wish
to set a different first position should extend this method. Such explorers
may wish to call BaseExplorer.first(center=False), which initializes the
position tuple but does not call centerImage() (which could cause
unnecessary filtering to occur).
"""
BaseExplorer.first(self)
self.directionIndex = 0
if self.numImages:
self._firstSweepPosition()
def next(self, seeking=False):
"""
Go to the next position (next iteration).
seeking -- Boolean that indicates whether the explorer is calling next()
from seek(). If True, the explorer should avoid unnecessary computation
that would not affect the seek command. The last call to next() from
seek() will be with seeking=False.
"""
BaseExplorer.next(self)
# If filters were changed, order may be invalid
if self.order is None or \
len([x for x in self.order if type(x) == int]) != self.numFilters:
# If user did not set a custom order, just create new one automatically
if not self.customOrder:
self.order = ["image"]
self.order.extend(range(self.numFilters))
self.order += ["sweep"]
# Otherwise, user needs to recreate the explorer with a new order
else:
raise RuntimeError("'order' is invalid. Must recreate explorer with "
"valid order after changing filters.")
if self.position['reset'] and self.blankWithReset:
# Last iteration was a blank, so don't increment the position
self.position['reset'] = False
else:
self.position['reset'] = False
for x in reversed(self.order):
if x == 'image': # Iterate the image
self.position['image'] += 1
if self.position['image'] == self.numImages:
self.position['image'] = 0
self.position['reset'] = True
else:
break
elif x == 'sweep': # Iterate the sweep position
nextImage = self._nextSweepPosition()
if not nextImage:
break
else: # Iterate the filter with index x
self.position['filters'][x] += 1
if self.position['filters'][x] == self.numFilterOutputs[x]:
self.position['filters'][x] = 0
self.position['reset'] = True
else:
break
if nextImage:
self._firstSweepPosition()
def getNumIterations(self, image):
"""
Get the number of iterations required to completely explore the input space.
Explorers that do not wish to support this method should not override it.
image -- If None, returns the sum of the iterations for all the loaded
images. Otherwise, image should be an integer specifying the image for
which to calculate iterations.
| ImageSensor takes care of the input validation.
"""
if image is None:
filteredImages = []
for i in xrange(self.numImages):
filteredImages.extend(self.getAllFilteredVersionsOfImage(i))
else:
filteredImages = self.getAll | FilteredVersionsOfImage(image)
return sum([self._getNumIterationsForImage(x[0]) for x in filteredImages])
def _firstSweepPosition(self):
"""
Go to the first sweep position for the current image and sweep direction.
"""
sbbox = self._getSweepBoundingBox(self.getFilteredImages()[0])
direction = self.sweepDirections[self.directionIndex]
if direction in ('right', 'down'):
self.position['offset'][0] = sbbox[0]
self.position['offset'][1] = sbbox[1]
elif direction == 'left':
self.position['offset'][0] = sbbox[2] - 1
self.position['offset'][1] = sbbox[1]
elif direction == 'up':
self.position['offset'][0] = sbbox[0]
self.position['offset'][1] = sbbox[3] - 1
def _nextSweepPosition(self):
"""
Increment the sweep position.
Return True (nextImage) if we exhausted all sweeps.
"""
sbbox = self._getSweepBoundingBox(self.getFilteredImages()[0])
direction = self.sweepDirections[self.directionIndex]
nextDirection = False
if direction == 'right':
self.position['offset'][0] += self.shiftDuringSweep
if self.position['offset'][0] >= sbbox[2]:
self.position['reset'] = True
self.position['offset'][0] = sbbox |
associatedpress/datakit-data | tests/commands/test_push.py | Python | isc | 1,702 | 0 | from unittest import mock
import pytest
from conftest import create_project_config
from datakit_data import Push
@pytest.fixture(autouse=True)
def initialize_data_configs(dkit_home, fake_project):
project_configs = {
's3_bucket': 'foo.org',
's3_path': '2017/fake-project',
'aws_user_profile': 'ap'
}
create_project_config(fake_project, project_configs)
def test_s3_instantiation(mocker):
"""
S3 wrapper instantiated properly
"""
s3_mock = mocker.patch(
'datakit_data.commands.push.S3',
autospec=True,
)
cmd = Push(None, None, 'data push')
parsed_args = mock.Mock()
parsed_args.args = []
cmd.run(parsed_args)
# S3 instantiated with project-level configs for
# user profile and bucket
s3_mock.assert_called_once_with('ap', 'foo.org')
def test_push_invocation(mocker):
"""
S3.push invoked with default data dir and s3 path
"""
push_mock = mocker.patch(
'datakit_data.commands.push.S3.push',
autospec=True,
)
cmd = Push(None, None, 'data push')
parsed_args = mock.Mock()
parsed_args.args = []
cmd.run(parsed_args)
push_mock.assert_any_call(
mock.ANY,
'data/',
'2017/fake-project',
extra_flags=[]
| )
def test_boolean_cli_flags(mocker):
push_mock = mocker.patch(
'datakit_data. | commands.push.S3.push',
autospec=True,
)
parsed_args = mock.Mock()
parsed_args.args = ['dry-run']
cmd = Push(None, None, 'data push')
cmd.run(parsed_args)
push_mock.assert_any_call(
mock.ANY,
'data/',
'2017/fake-project',
extra_flags=['--dry-run']
)
|
SpectraLogic/samba | selftest/tests.py | Python | gpl-3.0 | 4,687 | 0.00064 | #!/usr/bin/python
# This script generates a list of testsuites that should be run as part of
# the Samba test suite.
# The output of this script is parsed by selftest.pl, which then decides
# which of the tests to actually run. It will, for example, skip all tests
# listed in selftest/skip or only run a subset during "make quicktest".
# The idea is that this script outputs all of the tests of Samba, not
# just those that are known to pass, and list those that should be skipped
# or are known to fail in selftest/skip or selftest/knownfail. This makes it
# very easy to see what functionality is still missing in Samba and makes
# it possible to run the testsuite against other servers, such as
# Windows that have a different set of features.
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
from selftesthelpers import *
try:
config_h = os.environ["CONFIG_H"]
except KeyError:
config_h = os.path.join(samba4bindir, "default/include/config.h")
# define here var to check what we support
f = open(config_h, 'r')
try:
have_man_pages_support = ("XSLTPROC_MANPAGES 1" in f.read())
finally:
f.close()
planpythontestsuite("none", "samba.tests.source")
if have_man_pages_support:
planpythontestsuite("none", "samba.tests.docs")
planpythontestsuite("none", "selftest.tests.test_suite", extra_path=[srcdir()])
try:
import testscenarios
except ImportError:
skiptestsuite("subunit", "testscenarios not available")
else:
planpythontestsuite("none", "subunit.tests.test_suite")
planpythontestsuite("none", "samba.tests.blackbox.ndrdump")
planpythontestsuite("none", "api", name="ldb.python", extra_path=['lib/ldb/tests/python'])
planpythontestsuite("none", "samba.tests.credentials")
planpy | thontestsuite("none", "samba.tests.registry")
planpythontestsuite("none", "samba.tests.auth")
planpythontestsuite("none", "samba.tests.getopt")
planpythontestsuite("none", "samba.tests.secur | ity")
planpythontestsuite("none", "samba.tests.dcerpc.misc")
planpythontestsuite("none", "samba.tests.param")
planpythontestsuite("none", "samba.tests.upgrade")
planpythontestsuite("none", "samba.tests.core")
planpythontestsuite("none", "samba.tests.provision")
planpythontestsuite("none", "samba.tests.samba3")
planpythontestsuite("none", "samba.tests.strings")
planpythontestsuite("none", "samba.tests.netcmd")
planpythontestsuite("none", "samba.tests.dcerpc.rpc_talloc")
planpythontestsuite("none", "samba.tests.hostconfig")
planpythontestsuite("ad_dc_ntvfs:local", "samba.tests.messaging")
planpythontestsuite("none", "samba.tests.samba3sam")
planpythontestsuite(
"none", "wafsamba.tests.test_suite",
extra_path=[os.path.join(samba4srcdir, "..", "buildtools"),
os.path.join(samba4srcdir, "..", "third_party", "waf", "wafadmin")])
plantestsuite(
"samba4.blackbox.dbcheck.alpha13", "none",
["PYTHON=%s" % python, os.path.join(bbdir, "dbcheck-oldrelease.sh"),
'$PREFIX_ABS/provision', 'alpha13', configuration])
plantestsuite(
"samba4.blackbox.dbcheck.release-4-0-0", "none",
["PYTHON=%s" % python, os.path.join(bbdir, "dbcheck-oldrelease.sh"),
'$PREFIX_ABS/provision', 'release-4-0-0', configuration])
plantestsuite(
"samba4.blackbox.dbcheck.release-4-1-0rc3", "none",
["PYTHON=%s" % python, os.path.join(bbdir, "dbcheck-oldrelease.sh"),
'$PREFIX_ABS/provision', 'release-4-1-0rc3', configuration])
plantestsuite(
"samba4.blackbox.dbcheck.release-4-1-6-partial-object", "none",
["PYTHON=%s" % python, os.path.join(bbdir, "dbcheck-oldrelease.sh"),
'$PREFIX_ABS/provision', 'release-4-1-6-partial-object', configuration])
plantestsuite(
"samba4.blackbox.upgradeprovision.alpha13", "none",
["PYTHON=%s" % python,
os.path.join(bbdir, "upgradeprovision-oldrelease.sh"),
'$PREFIX_ABS/provision', 'alpha13', configuration])
plantestsuite(
"samba4.blackbox.upgradeprovision.release-4-0-0", "none",
["PYTHON=%s" % python,
os.path.join(bbdir, "upgradeprovision-oldrelease.sh"),
'$PREFIX_ABS/provision', 'release-4-0-0', configuration])
planpythontestsuite("none", "samba.tests.upgradeprovision")
planpythontestsuite("none", "samba.tests.xattr")
planpythontestsuite("none", "samba.tests.ntacls")
planpythontestsuite("none", "samba.tests.policy")
planpythontestsuite("none", "samba.tests.graph_utils")
planpythontestsuite("none", "samba.tests.ldif_utils")
plantestsuite("wafsamba.duplicate_symbols", "none", [os.path.join(srcdir(), "buildtools/wafsamba/test_duplicate_symbol.sh")])
|
mrunge/openstack_horizon | openstack_horizon/dashboards/project/routers/ports/urls.py | Python | apache-2.0 | 949 | 0 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
| #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_horizon.dashboards.project.routers.ports import views
PORTS = r'^(?P<port_id>[^/]+)/%s$'
urlpatterns = patterns(
'horizon.dashboards.project.networks.ports.views',
url(PORTS % 'detail', views.DetailView.as_view(), name='detail'))
|
tencentyun/python-sdk | python2/tencentyun/imageprocess.py | Python | mit | 3,116 | 0.013479 | # -*- coding: utf-8 -*-
import os.path
import time
import urllib
import json
import requests
from tencentyun import conf
from .auth import Auth
class ImageProcess(object):
def __init__(self, appid, secret_id, secret_key, bucket):
self.IMAGE_FILE_NOT_EXISTS = -1
self._secret_id,self._secret_key = secret_id,secret_key
conf.set_app_info(appid, secret_id, secret_key, bucket)
def porn_detect(self, porn_detect_url):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign(porn_detect_url)
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
payload = {
'bucket':app_info['bucket'],
'appid':int(app_info['appid']),
'url':(porn_detect_url).encode("utf-8"),
}
header = {
'Authorization':sign,
'Content-Type':'application/json',
}
r = {}
r = requests.post(url, data=json.dumps(payload), headers=header)
ret = r.json()
return ret
def porn_detect_url(self, porn_url):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign()
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
payload = {
'bucket':app_info['bucket'],
'appid':int(app_info['appid']),
'url_list':porn_url,
}
header = {
'Authorization':sign,
'Content-Type':'application/json',
}
r = {}
r = requests.post(url, data=json.dumps(payload), headers=header)
ret = r.json()
return ret
def porn_detect_file(self, porn_file):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign()
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
header = {
'Authorization':sign,
}
files = {
'appid':(None,app_info['appid'],None),
'bucket':(None,app_info['bucket'],None),
}
i=0
for pfile in porn_file:
pfile = pfile.decode('utf-8')
local_path = os.path.abspath(pfile)
if not os.path.exists(local_path):
return {'httpcode':0, 'code':self.IMAGE_FILE_NOT_EXISTS, 'message':'fi | le ' + pfile + ' not exists', 'data':{}}
i+=1
files['image['+str(i-1)+']']=(pfile, open(pfile,'r | b'))
r = requests.post(url, headers=header, files=files)
ret = r.json()
return ret
|
struqt/invar | invar-example/target/generated-sources/example/python/TestDbMemberEntry.py | Python | mit | 4,050 | 0.00669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===------------------------------* Python *------------------------------===
# THIS FILE IS GENERATED BY INVAR. DO NOT EDIT !!!
# ===------------------------------------------------------------------------===
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from InvarCodec import DataWriter
from InvarCodec import DataReader
class MemberEntry(object):
"""名字冲突的类型"""
CRC32_ = 0x00240151
SIZE_ = 29
__slots__ = (
'_id',
'_phone',
'_nickName',
'_createTime',
'_updateTime',
'_hotfix')
#__slots__
def __init__(self):
self._id = 0
self._phone = ''
self._nickName = ''
self._createTime = -1
self._updateTime = -1
self._hotfix = None
#def __init__
def __str__(self):
s = StringIO()
s.write(u'{')
s.write(u' ')
s.write(u'MemberEntry')
s.write(u',')
s.write(u' ')
s.write(u'id')
s.write(u':')
s.write(unicode(self._id))
s.write(u',')
s.write(u' ')
s.write(u'phone')
s.write(u':')
s.write(u'"')
s.write(self._phone)
s.write(u'"')
s.write(u',')
s.write(u' ')
s.write(u'nickName')
s.write(u':')
s.write(u'"')
s.write(self._nickName)
s.write(u'"')
s.write(u',')
s.write(u' ')
s.write(u'createTime')
s.write(u':')
s.write(unicode(self._createTime))
s.write(u',')
s.write(u' ')
s.write(u'updateTime')
s.write(u':')
s.write(unicode(self._updateTime))
s.write(u',')
s.write(u' ')
s.write(u'hotfix')
s.write(u':')
if self._hotfix is None:
s.write(u'null')
else:
s.write(u'[')
s.write(str(len(self._hotfix)))
s.write(u']')
s.write(u' ')
s.write(u'}')
result = s.getvalue()
s.close()
return result
#def __str__
def __len__(self):
size = MemberEntry.SIZE_
size += len(self._phone)
size += len(self._nickName)
if self._hotfix is not None:
size += 4
for (k1,v1) in self._hotfix.items():
size += len(k1)
size += len(v1)
return size
#def __len__
def read(r):
self._id = r.readUInt32()
self._phone = r.readString()
self._nickName = r.readString()
self._createTime = r.readInt64()
self._updateTime = r.readInt64()
hotfixExists = r.readInt8()
if 0x01 == hotfixExists:
if self._hotfix == None:
self._hotfix = dict()
lenHotfix = r.readUInt32()
num = 0
while num < lenHotfix:
num += 1
k1 = r.readString()
v1 = r.readString()
self._hotfix[k1] = v1
elif 0x00 == hotfixExists:
self._hotfix = None
else:
| raise InvarError(498, 'Protoc read error: The value of \'hotfixExists\' is invalid.')
#def read
def write(w):
w.writeUInt32(self._id)
w.writeString(self._phone)
w.writeString(se | lf._nickName)
w.writeInt64(self._createTime)
w.writeInt64(self._updateTime)
if self._hotfix != None:
w.writeUInt8(0x01)
w.writeUInt32(len(self._hotfix))
for (k1,v1) in self._hotfix.items():
w.writeString(k1)
w.writeString(v1)
else:
w.writeUInt8(0x00)
#def write
#class MemberEntry
if '__main__' == __name__:
print('dir(MemberEntry()) =>\n' + '\n'.join(dir(MemberEntry())))
print('MemberEntry.__doc__ => ' + MemberEntry.__doc__)
print('MemberEntry.__len__ => ' + str(len(MemberEntry())))
print('MemberEntry.__str__ => ' + str(MemberEntry()))
|
kizza/CSS-Less-ish | tests/testfuncs.py | Python | mit | 1,589 | 0.031466 | import sys, re
if sys.version_info < (3, 0):
import testcase
import modules.cssfuncs as funcs
else:
from . import testcase
from ..modules import cssfuncs as funcs
class TestFunctions(testcase.TestCase):
title = "CSS Functions"
def test_functions(self):
self.set_text( self.input() )
self.text_equals( self.input() )
self.compile()
self.find( re.escape(self.result()) )
self.decompile()
self.text_equals( self.input() )
def vars(self):
return """
/*
* @box-shadow = box-shadow(0 0 4px #ff0)
* @transition = transition(all 0.3s ease)
* @transform = transform(rotate(7.deg))
* @gradient1 = linear-gradient(#fff, #f0 | 0)
* @gradient2 = linear-gradient(to top, #fff, #f00)
* @gradient3 = linear-gradient(to bottom , #fff, #f00)
*/
"""
def input(self):
return self.vars()+"""
h1 {
@box-shadow;
@transform;
@transition;
@gradient1;
@gradient2;
@gradient3;
}
"""
def result(self):
return self.vars()+"""
h1 {
-webkit-box-shadow: 0 0 4px #ff0;
box-shadow: 0 0 4px #ff0;
-webkit-transform: rotate(7.deg);
-ms-transform: rotate(7.deg);
transform: rotate(7 | .deg);
-webkit-transition: all 0.3s ease;
transition: all 0.3s ease;
background-image: -webkit-linear-gradient(bottom, #fff, #f00);
background-image: linear-gradient(to top, #fff, #f00);
background-image: -webkit-linear-gradient(bottom, #fff, #f00);
background-image: linear-gradient(to top, #fff, #f00);
background-image: -webkit-linear-gradient(top, #fff, #f00);
background-image: linear-gradient(to bottom , #fff, #f00);
}
"""
|
t10471/python | practice/src/design_pattern/Interpreter.py | Python | mit | 2,299 | 0.005729 | # -*- coding: utf-8 -*-
import datetime
import os
#compsiteとcommandをあわせたような形
#ContextがhandlerでCommandが処理
class JobCommand(object):
def execute(self, context):
if context.getCurrentCommand() != 'begin':
raise Exception('illegal command ' + str(context.getCurrentCommand()))
command_list = CommandListCommand()
command_list.execute(context.next())
class CommandListCommand(object):
def execute(self, context):
while (True):
current_command = context.getCurrentCommand()
if current_command is None:
raise Exception('"end" not found ')
elif current_command == 'end':
break
else:
command = CommandCommand()
command.execute(context)
context.next()
class CommandCommand(object):
def execute(self, context):
current_command = context.getCurrentCommand()
if current_command == 'diskspace':
free_size = 10000 | 0000.0
max_size = 210000000.0
ratio = free_size / max_size * 100
print( 'Disk Free : %dMB (%.2f%%)' % (free_size / 1024 / 1024, | ratio))
elif current_command == 'date':
print datetime.datetime.today().strftime("%Y/%m/%d")
elif current_command == 'line':
print '--------------------'
else:
raise Exception('invalid command [' + str(current_command) + ']')
class Context(object):
def __init__(self, command):
self.commands = []
self.current_index = 0
self.max_index = 0
self.commands = command.strip().split()
print self.commands
self.max_index = len(self.commands)
def next(self):
self.current_index += 1
print self.current_index
return self
def getCurrentCommand(self):
if self.current_index > len(self.commands):
return None
return self.commands[self.current_index].strip()
def execute(command):
job = JobCommand()
try:
job.execute(Context(command))
except Exception, e:
print e.args
if __name__ == '__main__':
command = 'begin date line diskspace end'
if command != '':
execute(command)
|
pytorch/vision | setup.py | Python | bsd-3-clause | 20,786 | 0.002309 | import distutils.command.clean
import distutils.spawn
import glob
import os
import shutil
import subprocess
import sys
import torch
from pkg_resources import parse_version, get_distribution, DistributionNotFound
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
def read(*names, **kwargs):
with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def get_dist(pkgname):
try:
return get_distribution(pkgname)
except DistributionNotFound:
return None
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, "version.txt")
with open(version_txt) as f:
version = f.readline().strip()
sha = "Unknown"
package_name = "torchvision"
try:
sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
except Exception:
pass
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, "torchvision", "version.py")
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = {repr(sha)}\n")
f.write("from torchvision.extension import _check_cuda_version\n")
f.write("if _check_cuda_version() > 0:\n")
f.write(" cuda = _check_cuda_version()\n")
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
requirements = [
"typing_extensions",
"numpy",
"requests",
pytorch_dep,
]
# Excluding 8.3.* because of https://github.com/pytorch/vision/issues/4934
pillow_ver = " >= 5.3.0, !=8.3.*"
pillow_req = "pillow-simd" if get_dist("pillow-simd") is not None else "pillow"
requirements.append(pillow_req + pillow_ver)
def find_library(name, vision_include):
this_dir = os.path.dirname(os.path.abspath(__file__))
build_prefix = os.environ.get("BUILD_PREFIX", None)
is_conda_build = build_prefix is not None
library_found = False
conda_installed = False
lib_folder = None
include_folder = None
library_header = f"{name}.h"
# Lookup in TORCHVISION_INCLUDE or in the package file
package_path = [os.path.join(this_dir, "torchvision")]
for folder in vision_include + package_path:
candidate_path = os.path.join(folder, library_header)
library_found = os.path.exists(candidate_path)
if library_found:
break
if not library_found:
print(f"Running build on conda-build: {is_conda_build}")
if is_conda_build:
# Add conda headers/libraries
if os.name == "nt":
build_prefix = os.path.join(build_prefix, "Library")
include_folder = os.path.join(build_prefix, "include")
lib_folder = os.path.join(build_prefix, "lib")
library_header_path = os.path.join(include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
else:
# Check if using Anaconda to produce wheels
conda = distutils.spawn.f | ind_executable("conda")
is_conda = conda is not None
print(f"Running build on conda: {is_conda}")
if is_conda:
python_executable = sys.executable
py_folder = os.path.dirname(python_executable)
| if os.name == "nt":
env_path = os.path.join(py_folder, "Library")
else:
env_path = os.path.dirname(py_folder)
lib_folder = os.path.join(env_path, "lib")
include_folder = os.path.join(env_path, "include")
library_header_path = os.path.join(include_folder, library_header)
library_found = os.path.isfile(library_header_path)
conda_installed = library_found
if not library_found:
if sys.platform == "linux":
library_found = os.path.exists(f"/usr/include/{library_header}")
library_found = library_found or os.path.exists(f"/usr/local/include/{library_header}")
return library_found, conda_installed, include_folder, lib_folder
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "torchvision", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + glob.glob(
os.path.join(extensions_dir, "ops", "*.cpp")
)
source_cpu = (
glob.glob(os.path.join(extensions_dir, "ops", "autograd", "*.cpp"))
+ glob.glob(os.path.join(extensions_dir, "ops", "cpu", "*.cpp"))
+ glob.glob(os.path.join(extensions_dir, "ops", "quantized", "cpu", "*.cpp"))
)
is_rocm_pytorch = False
if torch.__version__ >= "1.5":
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (torch.version.hip is not None) and (ROCM_HOME is not None)
if is_rocm_pytorch:
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="torchvision/csrc/ops/cuda/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "hip", "*.hip"))
# Copy over additional files
for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"):
shutil.copy(file, "torchvision/csrc/ops/hip")
else:
source_cuda = glob.glob(os.path.join(extensions_dir, "ops", "cuda", "*.cu"))
source_cuda += glob.glob(os.path.join(extensions_dir, "ops", "autocast", "*.cpp"))
sources = main_file + source_cpu
extension = CppExtension
compile_cpp_tests = os.getenv("WITH_CPP_MODELS_TEST", "0") == "1"
if compile_cpp_tests:
test_dir = os.path.join(this_dir, "test")
models_dir = os.path.join(this_dir, "torchvision", "csrc", "models")
test_file = glob.glob(os.path.join(test_dir, "*.cpp"))
source_models = glob.glob(os.path.join(models_dir, "*.cpp"))
test_file = [os.path.join(test_dir, s) for s in test_file]
source_models = [os.path.join(models_dir, s) for s in source_models]
tests = test_file + source_models
tests_include_dirs = [test_dir, models_dir]
define_macros = []
extra_compile_args = {"cxx": []}
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
else:
define_macros += [("WITH_HIP", None)]
nvcc_flags = []
extra_compile_args["nvcc"] = nvcc_flags
if sys.platform == "win32":
define_macros += [("torchvision_EXPORTS", None)]
define_macros += [("USE_PYTHON", None)]
extra_compile_args["cxx"].append("/MP")
debug_mode = os.getenv("DEBUG", "0") == "1"
if debug_mode:
print("Compile in debug mode")
extra_compile_args["cxx"].append("-g")
extra_compile_args["cxx"].append("-O0")
if "nvcc" in extra_compile_args:
# we have to remove "-OX" and "-g" flag if exists and append
nvcc_flags = extra_compile_args["nvcc"]
extra_compile_args["nvcc"] = [f for f in nvcc_flags if not ("-O" in f or "-g" in f)]
extra_compile_args["nvcc"].append("-O0")
extra_compile_args["nvcc"].append("-g")
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"torchvision._C",
|
loic/django | tests/gis_tests/inspectapp/models.py | Python | bsd-3-clause | 713 | 0 | from ..models i | mport models
class AllOGRFields(models.Model):
f_decimal = models.FloatField()
f_float = models.FloatField()
f_int = models.IntegerField()
f_char = models.CharField(max_length=10)
f_date = models.DateField()
f_datetime = models.DateTimeField()
f_time = models.TimeField()
geom = models.PolygonField()
point = models.PointField()
class Meta:
required_db_features = ['gis_enabled']
class Fields3D(models.Model):
point = mo | dels.PointField(dim=3)
pointg = models.PointField(dim=3, geography=True)
line = models.LineStringField(dim=3)
poly = models.PolygonField(dim=3)
class Meta:
required_db_features = ['gis_enabled']
|
xbash/LabUNAB | 11_diccionarios/0517_diccionarios3.py | Python | gpl-3.0 | 1,470 | 0.038095 | #python27
#declaracion de diccionario
agenda = {'Juan':12 | 3, 'Jose':456, 'Jorge':789, 'Juana':012, 'Joyce':345}
def modificar():
print '...modificando dato de...'
print '\n[1] Juan'
print '\n[2] Jose'
print '\n[3] Jorge'
print '\n[4] Juanita'
print '\n[5] Joyce'
opcion = input('Ingresa la opcion')
if opcion == 1:
a = input('Ingresar un nuevo numero telefonico: ')
if opcion == 2:
b = input('Ingresar un nuevo numero telefonico: ')
if opcion == 3:
c = input('Ingresar un nuevo numero telefoni | co: ')
if opcion == 4:
d = input('Ingresar un nuevo numero telefonico: ')
if opcion == 5:
e = input('Ingresar un nuevo numero telefonico: ')
##el valor de la variable cambia por segun lo modificado
agenda = {'Juan':a, 'Jose':b,'Jorge':c, 'Juanita':d, 'Joyce':e}
def contacto():
print '...'
menu()
def search():
print '...'
print '\n[1] Juan'
print '\n[2] Jose'
print '\n[3] Jorge'
print '\n[4] Juanita'
print '\n[5] Joyce'
def salid():
print '\n Saliendo....'
def menu():
print '*'*40
print '\tMenu'
print '*'*40
print '\n[1] Modificar numeros telefonicos'
print '\n[2] Mostrar numero'
print '\n[3] Buscar datos de contacto'
print '\n[4] Salir'
opcion = inpput('Ingresar una opcion')
if opcion == 1:
modificar()
if opcion == 2:
contacto()
if opcion == 3:
search()
if opcion == 4:
salir()
menu()
|
riklaunim/django-custom-multisite | django/db/models/manager.py | Python | bsd-3-clause | 8,241 | 0.001942 | import copy
from django.db import router
from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
def ensure_default_manager(sender, **kwargs):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
cls = sender
if cls._meta.abstract:
return
if not getattr(cls, '_default_manager', None):
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
except FieldDoesNotExist:
pass
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
signals.class_prepared.connect(ensure_default_manager)
class Manager(object):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
def __init__(self):
super(Manager, self).__init__()
self._set_creation_counter()
self.model = None
self._inherited = False
self._db = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
setattr(model, name, ManagerDescriptor(self))
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = Manager.creation_counter
Manager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using):
obj = copy.copy(self)
obj._db = using
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model)
#######################
# PROXIES TO QUERYSET #
#######################
def get_empty_query_set(self):
return EmptyQuerySet(self.model, using=self._db)
def get_query_set(self):
"""Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return QuerySet(self.model, using=self._db)
def none(self):
return self.get_empty_query_set()
def all(self):
return self.get_query_set()
def count(self):
return self.get_query_set().count()
def dates(self, *args, **kwargs):
return self.get_query_set().dates(*args, **kwargs)
def distinct(self, *args, **kwargs):
return self.get_query_set().distinct(*args, **kwargs)
def extra(self, *args, **kwargs):
return self.get_query_set().extra(*args, **kwargs)
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
def get_or_create(self, **kwargs):
return self.get_query_set().get_or_create(**kwargs)
def create(self, **kwargs):
return self.get_query_set().create(**kwargs)
def bulk_create(self, *args, **kwargs):
return self.get_query_set().bulk_create(*args, **kwargs)
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def aggregate(self, *args, **kwargs):
return self.get_query_set().aggregate(*args, **kwargs)
def annotate(self, *args, **kwargs):
return self.get_query_set().annotate(*args, **kwargs)
def complex_filter(self, *args, **kwargs):
return self.get_query_set().complex_filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return self.get_query_set().exclud | e(*args, **kwargs)
def in_bulk(self, *args, **kwargs):
return self.get_query_set().in_bulk(*args, **kwargs)
def iterator(s | elf, *args, **kwargs):
return self.get_query_set().iterator(*args, **kwargs)
def latest(self, *args, **kwargs):
return self.get_query_set().latest(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def select_for_update(self, *args, **kwargs):
return self.get_query_set().select_for_update(*args, **kwargs)
def select_related(self, *args, **kwargs):
return self.get_query_set().select_related(*args, **kwargs)
def prefetch_related(self, *args, **kwargs):
return self.get_query_set().prefetch_related(*args, **kwargs)
def values(self, *args, **kwargs):
list(self.get_query_set())
return self.get_query_set().values(*args, **kwargs)
def values_list(self, *args, **kwargs):
list(self.get_query_set())
return self.get_query_set().values_list(*args, **kwargs)
def update(self, *args, **kwargs):
return self.get_query_set().update(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.get_query_set().reverse(*args, **kwargs)
def defer(self, *args, **kwargs):
return self.get_query_set().defer(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def using(self, *args, **kwargs):
return self.get_query_set().using(*args, **kwargs)
def exists(self, *args, **kwargs):
return self.get_query_set().exists(*args, **kwargs)
def _insert(self, objs, fields, **kwargs):
return insert_query(self.model, objs, fields, **kwargs)
def _update(self, values, **kwargs):
return self.get_query_set()._update(values, **kwargs)
def raw(self, raw_query, params=None, *args, **kwargs):
return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance != None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class EmptyManager(Manager):
def ge |
makcedward/nlpaug | nlpaug/model/lang_models/machine_translation_transformers.py | Python | mit | 3,112 | 0.001928 | try:
import torch
from torch.utils import data as t_data
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
except ImportError:
# No installation required if not using this function
pass
from nlpaug.model.lang_models import LanguageModels
class MtTransformers(LanguageModels):
def __init__(self, src_model_name='facebook/wmt19-en-de', tgt_model_name='facebook/wmt19-de-en',
device='cuda', silence=True, batch_size=32, max_length=None):
super().__init__(device, model_type=None, silence=silence)
try:
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
except ModuleNotFoundError:
raise ModuleNotFoundError('Missed transformers library. Install transfomers by `pip install transformers`')
self.src_model_name = src_model_name
self.tgt_model_name = tgt_model_name
self.src_model = AutoModelForSeq2SeqLM.from_pretrained(self.src_model_name)
self.src_model.eval()
self.src_model.to(device)
self.tgt_model = AutoModelForSeq2SeqLM.from_pretrained(self.tgt_model_name)
self.tgt_model.eval()
self.tgt_model.to(device)
self.src_tokenizer = AutoTokenizer.from_pretrained(self.src_model_name)
self.tgt_tokenizer = AutoTokenizer.from_pretrained(self.tgt_model_name)
self.batch_size = batch_size
self.max_length = max_length
def get_device(self):
return str(self.src_model.device)
def predict(self, texts, target_words=None, n=1):
src_translated_texts = self.translate_one_step_batched(texts, self.src_tokenizer, self.src_model)
tgt_translated_texts = self.translate_one_step_batched(src_translated_texts, self.tgt_tokenizer, self.tgt_model)
return tgt_translated_texts
def translate_one_step_batched(
self, data, tokenizer, model
):
tokenized_texts = tokenizer(data, padding=True, return_tensors='pt')
tokenized_dataset = t_data.TensorDataset(*(tokenized_texts.values()))
tokenized_dataloader = t_data.DataLoader(
tokenized_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1
)
all_translated_ids = []
with torch.no_grad():
for batch in tokenized_dataloader:
batch = tuple(t.to(self.device) for t in batch)
input_ids, attention_mask = batch
translated_ids_batch = model.generate(
input_ids=input_ids, attention_mask=attention_mask,
max_length=self.max_length
)
| all_translated_ids.append(
translated_ids_batch.detach().cpu().numpy()
)
all_translated_texts = []
for translated_ids_batch in all_translated_ids:
translated | _texts = tokenizer.batch_decode(
translated_ids_batch,
skip_special_tokens=True
)
all_translated_texts.extend(translated_texts)
return all_translated_texts
|
DependencyWatcher/parser | dependencywatcher/parser/sbt.py | Python | apache-2.0 | 2,074 | 0.009643 | from dependencywatcher.parser.parser import Parser
from dependencywatcher.parser.pyparser import PyParser
from pyparsing import *
class SbtParser(PyParser):
def __init__(self, source):
super(SbtParser, self).__init__(source)
vars = {}
def store_var(st, locn, tokens):
vars[tokens[0]] = tokens[1]
return tokens
def resolve_var(st, locn, tokens):
try:
tokens[0][2] = vars[tokens[0][2]]
except KeyError:
pass
return tokens
VersionSpec = Suppress(Literal("\"")) + Regex("[\[\]\(]?[0-9][-A-Za-z0-9_.\+\,]+[\[\]\)]?") + Suppress(Literal("\""))
ScalaIdentifier = Regex("[a-zA-Z][A-Za-z0-9_.-]+")
VarKeyword = Literal("val") | Literal("var")
VarDeclaration = Suppress(VarKeyword) + ScalaIdentifier + Suppress(Literal("=")) + VersionSpec
VarDeclaration.setParseAction(store_var)
ArtifactIdentifier = Suppress(Literal("\"")) + Regex("[a-zA-Z][A-Za-z0-9_.-]+") + Suppress(Literal("\""))
VarDependency = Gr | oup(ArtifactIdentifier + Suppress(Literal("%")) + ArtifactIdentifier + Suppress(Literal("%")) + ScalaIdentifier)
VarDependency.setParseAction(resolve_var)
Dependency = Group(Artifac | tIdentifier + Suppress(Literal("%")) + ArtifactIdentifier + Suppress(Literal("%")) + VersionSpec)
self.parser = VarDeclaration | VarDependency | Dependency
self.parser.ignore(javaStyleComment)
self.parser.setParseAction(self.defaultParseAction)
def parse(self, dependencies):
data = self.parser.searchString(self.source.get_content())
for d in data.asList():
for token in d:
if len(token.data) == 3:
dep_name = "%s:%s" % (token.data[0], token.data[1])
version = token.data[2]
if version:
dependencies.append({"name": dep_name, "version": version, "context": "java", "line": token.line})
Parser.register_parser([".*\.sbt"], SbtParser)
|
111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_tunnel_l2tun_oper.py | Python | apache-2.0 | 488,131 | 0.020519 | """ Cisco_IOS_XR_tunnel_l2tun_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-l2tun package operational data.
This module contains definitions
for the following management objects\:
l2tp\: L2TP operational data
l2tpv2\: l2tpv2
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class DigestHashEnum(Enum):
"""
DigestHashEnum
Digest hash types
.. data:: md5 = 0
MD5
.. data:: sha1 = 1
SHA1
"""
md5 = 0
sha1 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['DigestHashEnum']
class L2Tp(object):
"""
L2TP operational data
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Classes>`
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.CounterHistFail>`
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters>`
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Session>`
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tu | n_oper.L2Tp.Sessions>`
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations>`
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels>`
"""
_p | refix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.classes = L2Tp.Classes()
self.classes.parent = self
self.counter_hist_fail = L2Tp.CounterHistFail()
self.counter_hist_fail.parent = self
self.counters = L2Tp.Counters()
self.counters.parent = self
self.session = L2Tp.Session()
self.session.parent = self
self.sessions = L2Tp.Sessions()
self.sessions.parent = self
self.tunnel_configurations = L2Tp.TunnelConfigurations()
self.tunnel_configurations.parent = self
self.tunnels = L2Tp.Tunnels()
self.tunnels.parent = self
class Counters(object):
"""
L2TP control messages counters
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.control = L2Tp.Counters.Control()
self.control.parent = self
class Control(object):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr>`
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_xr = L2Tp.Counters.Control.TunnelXr()
self.tunnel_xr.parent = self
self.tunnels = L2Tp.Counters.Control.Tunnels()
self.tunnels.parent = self
class TunnelXr(object):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication>`
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication = L2Tp.Counters.Control.TunnelXr.Authentication()
self.authentication.parent = self
self.global_ = L2Tp.Counters.Control.TunnelXr.Global_()
self.global_.parent = self
class Authentication(object):
"""
Tunnel authentication counters
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.CommonDigest>`
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.LocalSecret>`
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.NonceAvp>`
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios |
CoderBounty/coderbounty | website/migrations/0006_auto_20151115_0005.py | Python | agpl-3.0 | 397 | 0 | # -*- coding: | utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0005_auto_20151107_2021'),
]
operations = [
migrations.AddField(
model_name='issue',
name='views',
field=models.IntegerField(default=1),
),
]
| |
jeremyplichta/pi-halloween | door.py | Python | mit | 2,164 | 0.004159 | import threading
import logging
from os import listdir
from os.path import isfile, join
import random
import time
import time
import RPi.GPIO as G | PIO
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(asctime)s %(message)s',
)
class Door(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
threading.Thread.__init__(self, group=group, target=target, name=name,
verbose=verbose)
self.args = args
self.doorcallback = args[0]
| self.closecallback = args[1]
self.closedevent = args[2]
self.closedcount = 0
self.gpio_door = kwargs['gpio_door']
self.shouldstop = threading.Event()
self.kwargs = kwargs
self.closed = True
return
def stop(self):
self.shouldstop.set()
def setup(self):
# Set pins as output and input
GPIO.setup(self.gpio_door,GPIO.IN)
def run(self):
self.setup()
threshold = 40
while True:
if self.shouldstop.isSet():
logging.debug('exiting door thread')
return
# logging.debug('Door state: {} closedcount={}'.format(GPIO.input(self.gpio_door), self.closedcount))
if GPIO.input(self.gpio_door) == GPIO.LOW: # Check whether the button is pressed or not.
self.closedcount = min(threshold,self.closedcount + 1)
if not self.closed and self.closedcount == threshold:
self.closed = True
self.closedevent.set()
logging.info('Door Closed')
if self.closecallback:
self.closecallback()
else:
self.closedcount = 0
if self.closed:
self.closed = False
self.closedevent.clear()
logging.info('Door Open')
if self.doorcallback:
self.doorcallback()
time.sleep(.05)
|
janusnic/21v-python | unit_13/qcombobox.py | Python | mit | 796 | 0.023869 | from PyQt4 import QtGui
from PyQt | 4 import QtCore
from PyQt4.QtCore import pyqtSlot,SIGNAL,SLOT
import sys
class myMainWindow(QtGui.QMainWindow):
@pyqtSlot(int)
def onIndexChange(self, i):
print i
def main():
app = QtGui.QApplication(sys.argv)
window = myMainWindow()
palette = QtGui.QPalette()
comboBox = QtGui.QComboBox()
comboBox.addItem("Item 1")
comboBox.addItem("Item 2")
comboBox.addItem("Item 3")
window.setCentralWidget(comboB | ox)
comboBox.connect(comboBox,SIGNAL("currentIndexChanged(int)"),
window,SLOT("onIndexChange(int)"))
window.setWindowTitle('PyQt QComboBox CurrentIndexChange Example')
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
timgrossmann/dailyProgrammer | Challenges/Python/decodeWebPage.py | Python | mit | 698 | 0.012894 | import requests, os
from bs4 import BeautifulSoup
url = 'http://www.nytimes.com'
def extractArticles (url): |
data = requests.get(url)
soup = BeautifulSoup(data.text, 'html.parser')
articles = []
for article in soup.find_all('arti | cle'):
if article.find('h2') != None and article.find('h2').find('a') != None:
heading = article.find('h2').find('a').get_text().strip()
if heading != "":
articles.append(heading)
articles = sorted(list(set(articles)))
f = open('./articles/headlines2.txt', 'w')
for heading in articles:
f.write(heading)
f.write('\n')
f.close()
extractArticles(url) |
stevepiercy/readthedocs.org | readthedocs/core/middleware.py | Python | mit | 8,107 | 0.001357 | import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
| for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
| domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if (getattr(proj, 'single_version', False) and
not getattr(settings, 'USE_SUBDOMAIN', False)):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
# Forked from old Django
class ProxyMiddleware(object):
"""
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
|
kevana/phonedusk-server | tests/test_models.py | Python | bsd-3-clause | 1,682 | 0.001189 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from phonedusk.user.models import User, Role
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
def test_get_by_id(self):
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
user | = UserFactory(password="myprecious")
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
user = User.create(username="foo", email="foo@bar.com",
password="foobarbaz123")
assert user.check_password('foob | arbaz123') is True
assert user.check_password("barfoobaz") is False
def test_full_name(self):
user = UserFactory(first_name="Foo", last_name="Bar")
assert user.full_name == "Foo Bar"
def test_roles(self):
role = Role(name='admin')
role.save()
u = UserFactory()
u.roles.append(role)
u.save()
assert role in u.roles
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.