repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
YuxuanLing/trunk
|
trunk/code/study/python/Fluent-Python-example-code/17-futures/countries/flags2_common.py
|
1
|
5430
|
"""Utilities for second set of flag examples.
"""
import os
import time
import sys
import string
import argparse
from collections import namedtuple
from enum import Enum
Result = namedtuple('Result', 'status data')
HTTPStatus = Enum('Status', 'ok not_found error')
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
DEFAULT_CONCUR_REQ = 1
MAX_CONCUR_REQ = 1
SERVERS = {
'REMOTE': 'http://flupy.org/data/flags',
'LOCAL': 'http://localhost:8001/flags',
'DELAY': 'http://localhost:8002/flags',
'ERROR': 'http://localhost:8003/flags',
}
DEFAULT_SERVER = 'LOCAL'
DEST_DIR = 'downloads/'
COUNTRY_CODES_FILE = 'country_codes.txt'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def initial_report(cc_list, actual_req, server_label):
if len(cc_list) <= 10:
cc_msg = ', '.join(cc_list)
else:
cc_msg = 'from {} to {}'.format(cc_list[0], cc_list[-1])
print('{} site: {}'.format(server_label, SERVERS[server_label]))
msg = 'Searching for {} flag{}: {}'
plural = 's' if len(cc_list) != 1 else ''
print(msg.format(len(cc_list), plural, cc_msg))
plural = 's' if actual_req != 1 else ''
msg = '{} concurrent connection{} will be used.'
print(msg.format(actual_req, plural))
def final_report(cc_list, counter, start_time):
elapsed = time.time() - start_time
print('-' * 20)
msg = '{} flag{} downloaded.'
plural = 's' if counter[HTTPStatus.ok] != 1 else ''
print(msg.format(counter[HTTPStatus.ok], plural))
if counter[HTTPStatus.not_found]:
print(counter[HTTPStatus.not_found], 'not found.')
if counter[HTTPStatus.error]:
plural = 's' if counter[HTTPStatus.error] != 1 else ''
print('{} error{}.'.format(counter[HTTPStatus.error], plural))
print('Elapsed time: {:.2f}s'.format(elapsed))
def expand_cc_args(every_cc, all_cc, cc_args, limit):
codes = set()
A_Z = string.ascii_uppercase
if every_cc:
codes.update(a+b for a in A_Z for b in A_Z)
elif all_cc:
with open(COUNTRY_CODES_FILE) as fp:
text = fp.read()
codes.update(text.split())
else:
for cc in (c.upper() for c in cc_args):
if len(cc) == 1 and cc in A_Z:
codes.update(cc+c for c in A_Z)
elif len(cc) == 2 and all(c in A_Z for c in cc):
codes.add(cc)
else:
msg = 'each CC argument must be A to Z or AA to ZZ.'
raise ValueError('*** Usage error: '+msg)
return sorted(codes)[:limit]
def process_args(default_concur_req):
server_options = ', '.join(sorted(SERVERS))
parser = argparse.ArgumentParser(
description='Download flags for country codes. '
'Default: top 20 countries by population.')
parser.add_argument('cc', metavar='CC', nargs='*',
help='country code or 1st letter (eg. B for BA...BZ)')
parser.add_argument('-a', '--all', action='store_true',
help='get all available flags (AD to ZW)')
parser.add_argument('-e', '--every', action='store_true',
help='get flags for every possible code (AA...ZZ)')
parser.add_argument('-l', '--limit', metavar='N', type=int,
help='limit to N first codes', default=sys.maxsize)
parser.add_argument('-m', '--max_req', metavar='CONCURRENT', type=int,
default=default_concur_req,
help='maximum concurrent requests (default={})'
.format(default_concur_req))
parser.add_argument('-s', '--server', metavar='LABEL',
default=DEFAULT_SERVER,
help='Server to hit; one of {} (default={})'
.format(server_options, DEFAULT_SERVER))
parser.add_argument('-v', '--verbose', action='store_true',
help='output detailed progress info')
args = parser.parse_args()
if args.max_req < 1:
print('*** Usage error: --max_req CONCURRENT must be >= 1')
parser.print_usage()
sys.exit(1)
if args.limit < 1:
print('*** Usage error: --limit N must be >= 1')
parser.print_usage()
sys.exit(1)
args.server = args.server.upper()
if args.server not in SERVERS:
print('*** Usage error: --server LABEL must be one of',
server_options)
parser.print_usage()
sys.exit(1)
try:
cc_list = expand_cc_args(args.every, args.all, args.cc, args.limit)
except ValueError as exc:
print(exc.args[0])
parser.print_usage()
sys.exit(1)
if not cc_list:
cc_list = sorted(POP20_CC)
return args, cc_list
def main(download_many, default_concur_req, max_concur_req):
args, cc_list = process_args(default_concur_req)
actual_req = min(args.max_req, max_concur_req, len(cc_list))
initial_report(cc_list, actual_req, args.server)
base_url = SERVERS[args.server]
t0 = time.time()
counter = download_many(cc_list, base_url, args.verbose, actual_req)
assert sum(counter.values()) == len(cc_list), \
'some downloads are unaccounted for'
final_report(cc_list, counter, t0)
|
gpl-3.0
| 4,697,844,028,749,351,000
| 34.442953
| 75
| 0.573849
| false
| 3.480769
| false
| false
| false
|
blackshirt/simpletrain
|
models.py
|
1
|
1532
|
'''
Database Models
Models defined here
===================
1. User
2. Employee
3. Workplace
4. Training
5. Letter
6. LearningAssignment
'''
import os
from datetime import date, datetime
from pony.orm import Database, sql_debug
from pony.orm import Required, Optional, Set
__all__ = ['db', 'User', 'Employee', 'Workplace', 'Training', 'Letter',
'LearningAssignment']
db = Database()
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
support_dir = os.path.join(dir_path, "support")
class Base(db.Entity):
name = Required(str)
class User(Base):
email = Optional(str)
password = Optional(str)
dob = Optional(date)
pob = Optional(str)
utype = Optional(str, default='guest')
class Employee(User):
nip = Required(str)
class Workplace(Base):
address = Optional(str)
city = Optional(str)
class SendRecvMixin(User, Workplace):
pass
class Sender(SendRecvMixin):
letter = Set("Letter")
class Receiver(SendRecvMixin):
letter = Set("Letter")
class Letter(db.Entity):
about = Required(str)
date = Optional(date)
number = Optional(str)
sender = Optional(Sender)
receiver = Set(Receiver)
inputed_at = Optional(datetime, default=datetime.now)
last_updated = Optional(datetime, default=datetime.now)
class Training(db.Entity):
title = Required(str)
class LearningAssignment(db.Entity):
about = Required(str)
sql_debug(True)
db.bind("sqlite", ":memory:", create_db=True)
db.generate_mapping(create_tables=True)
|
mit
| -565,255,332,013,163,500
| 17.238095
| 71
| 0.673629
| false
| 3.359649
| false
| false
| false
|
belatrix/BackendAllStars
|
activities/urls.py
|
1
|
1168
|
from .views import send_message_all, send_message_to, send_message_location, send_message_event
from .views import get_activities, get_messages, get_messages_from, get_messages_from_all, get_notifications
from django.conf.urls import url
urlpatterns = [
url(r'^send/message/all/$', send_message_all, name='send_message_all'),
url(r'^send/message/to/(?P<employee_username>\w+)/$', send_message_to, name='send_message_to'),
url(r'^send/message/location/(?P<location_id>\d+)/$', send_message_location, name='send_message_location'),
url(r'^send/message/event/(?P<event_id>\d+)/$', send_message_event, name='send_message_event'),
url(r'^get/activity/employee/(?P<employee_id>\d+)/all/$', get_activities, name='get_activities'),
url(r'^get/message/employee/(?P<employee_id>\d+)/all/$', get_messages, name='get_messages'),
url(r'^get/message/from/employee/all/$', get_messages_from_all, name='get_messages_from_all'),
url(r'^get/message/from/employee/(?P<employee_id>\d+)/all/$', get_messages_from, name='get_messages_from'),
url(r'^get/notification/employee/(?P<employee_id>\d+)/all/$', get_notifications, name='get_notifications'),
]
|
apache-2.0
| 4,920,638,106,448,543,000
| 72
| 111
| 0.696062
| false
| 3.208791
| false
| true
| false
|
jktubs/ctSESAM-python-memorizing
|
Crypter.py
|
1
|
1892
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Encryption and decryption module.
"""
from Crypto.Cipher import AES
from hashlib import pbkdf2_hmac
class Crypter:
"""
Encrypt and decrypt with AES in CBC mode with PKCS7 padding. The constructor calculates the key from the given
password and salt with PBKDF2 using HMAC with SHA512 and 32768 iterations.
"""
def __init__(self, salt, password):
self.iv = b'\xb5\x4f\xcf\xb0\x88\x09\x55\xe5\xbf\x79\xaf\x37\x71\x1c\x28\xb6'
self.key = pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 32768)[:32]
@staticmethod
def add_pkcs7_padding(data):
"""
Adds PKCS7 padding so it can be divided into full blocks of 16 bytes.
:param bytes data: data without padding
:return: padded data
:rtype: bytes
"""
length = 16 - (len(data) % 16)
data += bytes([length])*length
return data
def encrypt(self, data):
"""
Encrypts with AES in CBC mode with PKCS7 padding.
:param bytes data: data for encryption
:return: encrypted data
:rtype: bytes
"""
aes_object = AES.new(self.key, AES.MODE_CBC, self.iv)
return aes_object.encrypt(self.add_pkcs7_padding(data))
@staticmethod
def remove_pkcs7_padding(data):
"""
Removes the PKCS7 padding.
:param bytes data: padded data
:return: data without padding
:rtype: bytes
"""
return data[:-data[-1]]
def decrypt(self, encrypted_data):
"""
Decrypts with AES in CBC mode with PKCS7 padding.
:param bytes encrypted_data: encrypted data
:return: decrypted data
:rtype: bytes
"""
aes_object = AES.new(self.key, AES.MODE_CBC, self.iv)
return self.remove_pkcs7_padding(aes_object.decrypt(encrypted_data))
|
gpl-3.0
| -7,594,151,041,902,287,000
| 28.5625
| 114
| 0.609937
| false
| 3.680934
| false
| false
| false
|
stormi/tsunami
|
src/secondaires/familier/fiche.py
|
1
|
5370
|
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe FicheFamilier, détaillée plus bas."""
from abstraits.obase import BaseObj
from secondaires.familier.script import ScriptFiche
class FicheFamilier(BaseObj):
"""Classe représentant une fiche de familier.
Un familier est défini sur une fiche, tout comme les PNJ sont
définis sur les prototypes ou comme les matelots sont définis
sur des fiches de matelots. La fiche contient des informations
générales sur le PNJ (chaque prototype de PNJ a une fiche).
Par exemple, un cheval (prototype de PNJ 'cheval') peut avoir
une fiche de familier ('cheval'). Dans cette fiche, il est déclaré
que le cheval est un herbivore. Tous les chevau créés sur le
prototype 'cheval' ('cheval_0', 'cheval_1', 'cheval_2', ...)
pourront être des familiers qui utiliseront alors cette fiche.
"""
enregistrer = True
nom_scripting = "familier"
type_achat = "familier"
def __init__(self, cle):
"""Constructeur de la fiche."""
BaseObj.__init__(self)
self.cle = cle
self.regime = "herbivore"
self.monture = False
self.sorties_verticales = False
self.aller_interieur = False
self.stats_progres = ["force", "agilite", "robustesse", "intelligence"]
self.difficulte_apprivoisement = 10
self.harnachements = []
self.m_valeur = 50
self.script = ScriptFiche(self)
def __getnewargs__(self):
return ("", )
def __repr__(self):
return "<FicheFamilier {}>".format(self.cle)
def __str__(self):
return self.cle
@property
def prototype(self):
"""Retourne le prototype de PNJ associé."""
return importeur.pnj.prototypes.get(self.cle)
@property
def familiers(self):
"""Retourne la liste des familiers créés sur cette fiche.
ATTENTION : cette méthode retourne les familiers, pas les
PNJ. Un PNJ peut être créé sur le prototype de PNJ sans qu'il
apparaisse dans cette liste.
"""
familiers = list(importeur.familier.familiers.values())
familiers = [f for f in familiers if f.cle == self.cle]
return familiers
@property
def str_harnachements(self):
return ", ".join(sorted(self.harnachements))
@property
def str_stats_progres(self):
return ", ".join(sorted(self.stats_progres))
@property
def nom_achat(self):
return self.prototype and self.prototype.nom_singulier or "inconnu"
@property
def nom_singulier(self):
return self.prototype and self.prototype.nom_singulier or "inconnu"
@property
def nom_pluriel(self):
return self.prototype and self.prototype.nom_pluriel or "inconnus"
def get_nom(self, nombre=1):
"""Retourne le nom complet en fonction du nombre."""
if nombre == 0:
raise ValueError("Nombre invalide")
elif nombre == 1:
return self.nom_singulier
else:
return str(nombre) + " " + self.nom_pluriel
def acheter(self, quantite, magasin, transaction):
"""Achète les familiers dans la quantité spécifiée."""
salle = magasin.parent
acheteur = transaction.initiateur
i = 0
while i < quantite:
i += 1
pnj = importeur.pnj.creer_PNJ(self.prototype)
pnj.salle = salle
familier = importeur.familier.creer_familier(pnj)
familier.maitre = acheteur
familier.trouver_nom()
salle.envoyer("{} arrive.", pnj)
def regarder(self, personnage):
"""Le personnage regarde le familier avant achat."""
desc = self.prototype.description.regarder(personnage,
elt=self.prototype)
return desc
|
bsd-3-clause
| -3,066,334,919,988,521,500
| 35.868966
| 79
| 0.674336
| false
| 3.507874
| false
| false
| false
|
fernandog/Medusa
|
lib/tmdbsimple/tv.py
|
1
|
18419
|
# -*- coding: utf-8 -*-
"""
tmdbsimple.tv
~~~~~~~~~~~~~
This module implements the TV, TV Seasons, TV Episodes, and Networks
functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2018 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class TV(TMDB):
"""
TV functionality.
See: https://developers.themoviedb.org/3/tv
"""
BASE_PATH = 'tv'
URLS = {
'info': '/{id}',
'alternative_titles': '/{id}/alternative_titles',
'content_ratings': '/{id}/content_ratings',
'credits': '/{id}/credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'rating': '/{id}/rating',
'similar': '/{id}/similar',
'recommendations': '/{id}/recommendations',
'translations': '/{id}/translations',
'videos': '/{id}/videos',
'changes': '/{id}/changes',
'latest': '/latest',
'on_the_air': '/on_the_air',
'airing_today': '/airing_today',
'top_rated': '/top_rated',
'popular': '/popular',
}
def __init__(self, id=0):
super(TV, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the primary information about a TV series by id.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def alternative_titles(self, **kwargs):
"""
Get the alternative titles for a specific tv id.
Args:
language: (optional) ISO 3166-1 code.
append_to_response: (optional) Comma separated, any tv method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('alternative_titles')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def content_ratings(self, **kwargs):
"""
Get the content ratings for a TV Series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('content_ratings')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew information about a TV series. Just like the
website, we pull this information from the last season of the series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV series.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters and backdrops) for a TV series.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV show. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def similar(self, **kwargs):
"""
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def recommendations(self, **kwargs):
"""
Get the recommendations for TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def translations(self, **kwargs):
"""
Get the list of translations that exist for a TV series. These
translations cascade down to the episode level.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('translations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV series (trailers, opening
credits, etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific series id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The
maximum number of days that can be returned in a single request is 14.
The language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the most newly created TV show. This is a live response
and will continuously change.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def on_the_air(self, **kwargs):
"""
Get the list of TV shows that are currently on the air. This query
looks for any TV show that has an episode with an air date in the
next 7 days.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('on_the_air')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def airing_today(self, **kwargs):
"""
Get the list of TV shows that air today. Without a specified timezone,
this query defaults to EST (Eastern Time UTC-05:00).
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
timezone: (optional) Valid value from the list of timezones.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('airing_today')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def top_rated(self, **kwargs):
"""
Get the list of top rated TV shows. By default, this list will only
include TV shows that have 2 or more votes. This list refreshes every
day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('top_rated')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular TV shows. This list refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Seasons(TMDB):
"""
TV Seasons functionality.
See: https://developers.themoviedb.org/3/tv-seasons
"""
BASE_PATH = 'tv/{series_id}/season/{season_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'videos': '/videos',
}
def __init__(self, series_id, season_number):
super(TV_Seasons, self).__init__()
self.series_id = series_id
self.season_number = season_number
def info(self, **kwargs):
"""
Get the primary information about a TV season by its season number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew credits for a TV season by season number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters) that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV season (trailers, teasers,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Episodes(TMDB):
"""
TV Episodes functionality.
See: https://developers.themoviedb.org/3/tv-episodes
"""
BASE_PATH = 'tv/{series_id}/season/{season_number}/episode/{episode_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'rating': '/rating',
'videos': '/videos',
}
def __init__(self, series_id, season_number, episode_number):
super(TV_Episodes, self).__init__()
self.series_id = series_id
self.season_number = season_number
self.episode_number = episode_number
def info(self, **kwargs):
"""
Get the primary information about a TV episode by combination of a
season and episode number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a TV episode by combination of a season and
episode number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path(
'external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (episode stills) for a TV episode by combination of a
season and episode number. Since episode stills don't have a language,
this call will always return all images.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV episode. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV episode (teasers, clips,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Networks(TMDB):
"""
Networks functionality.
See: https://developers.themoviedb.org/3/networks
"""
BASE_PATH = 'network'
URLS = {
'info': '/{id}',
}
def __init__(self, id):
super(Networks, self).__init__()
self.id = id
def info(self, **kwargs):
"""
This method is used to retrieve the basic information about a TV
network. You can use this ID to search for TV shows with the discover.
At this time we don't have much but this will be fleshed out over time.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
gpl-3.0
| -6,584,255,332,790,810,000
| 28.708065
| 80
| 0.569846
| false
| 4.263657
| false
| false
| false
|
oskgeek/liftpass
|
config.py
|
1
|
2493
|
import sys
import os
# ------------------------------------------------------------------------------
# User Authentication - Liftpass is a single user application (for now)
# ------------------------------------------------------------------------------
UserKey = b'd759214482924d10ac159b794e9424e7'
UserSecret = b'4bf5d2c68e444ecab4d50adf8590544c'
# ------------------------------------------------------------------------------
# Paths
# ------------------------------------------------------------------------------
BasePath = os.path.abspath('./')
sys.path.append(BasePath)
# Where application data is stored
DataPath = os.path.join(BasePath,'data/')
# ------------------------------------------------------------------------------
# Analytics Storage - Where SDK updates are stored before being processed
# ------------------------------------------------------------------------------
AnalyticsStorage = {
'engine': 'core.storage.filesystem',
'path': os.path.join(DataPath, 'analytics/'),
'update': 600,
}
# ------------------------------------------------------------------------------
# Content Database - Where application content and settings are stored
# ------------------------------------------------------------------------------
ContentDatabase = {
'address': 'sqlite:///%s/content.db'%DataPath,
'debug': False
}
# ------------------------------------------------------------------------------
# Pricing Engine - Where data for prices are stored
# ------------------------------------------------------------------------------
PricingStorage = {
'engine': 'core.storage.filesystem',
'path': os.path.join(DataPath, 'prices/')
}
# ------------------------------------------------------------------------------
# Monitoring - records server activity and performance (not yet supported)
# ------------------------------------------------------------------------------
MonitorEngine = None
# ------------------------------------------------------------------------------
# Debug Terminal - caches user updates for debuging
# ------------------------------------------------------------------------------
DashboardTerminal = {
'engine': 'core.terminal.local',
'path': os.path.join(DataPath, 'terminal/')
}
# ------------------------------------------------------------------------------
# API Interface Service
# ------------------------------------------------------------------------------
APIServer = {
'address': '127.0.0.1',
'port': 9090,
'cors': True
}
|
apache-2.0
| -6,309,304,441,272,448,000
| 37.953125
| 80
| 0.349779
| false
| 5.455142
| false
| false
| false
|
Paulloz/godot
|
glsl_builders.py
|
3
|
7347
|
"""Functions used to generate source files during build time
All such functions are invoked in a subprocess on Windows to prevent build flakiness.
"""
from platform_methods import subprocess_main
class RDHeaderStruct:
def __init__(self):
self.vertex_lines = []
self.fragment_lines = []
self.compute_lines = []
self.vertex_included_files = []
self.fragment_included_files = []
self.compute_included_files = []
self.reading = ""
self.line_offset = 0
self.vertex_offset = 0
self.fragment_offset = 0
self.compute_offset = 0
def include_file_in_rd_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
while line:
if line.find("#[vertex]") != -1:
header_data.reading = "vertex"
line = fs.readline()
header_data.line_offset += 1
header_data.vertex_offset = header_data.line_offset
continue
if line.find("#[fragment]") != -1:
header_data.reading = "fragment"
line = fs.readline()
header_data.line_offset += 1
header_data.fragment_offset = header_data.line_offset
continue
if line.find("#[compute]") != -1:
header_data.reading = "compute"
line = fs.readline()
header_data.line_offset += 1
header_data.compute_offset = header_data.line_offset
continue
while line.find("#include ") != -1:
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if not included_file in header_data.vertex_included_files and header_data.reading == "vertex":
header_data.vertex_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif not included_file in header_data.fragment_included_files and header_data.reading == "fragment":
header_data.fragment_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif not included_file in header_data.compute_included_files and header_data.reading == "compute":
header_data.compute_included_files += [included_file]
if include_file_in_rd_header(included_file, header_data, depth + 1) is None:
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
line = fs.readline()
line = line.replace("\r", "")
line = line.replace("\n", "")
if header_data.reading == "vertex":
header_data.vertex_lines += [line]
if header_data.reading == "fragment":
header_data.fragment_lines += [line]
if header_data.reading == "compute":
header_data.compute_lines += [line]
line = fs.readline()
header_data.line_offset += 1
fs.close()
return header_data
def build_rd_header(filename):
header_data = RDHeaderStruct()
include_file_in_rd_header(filename, header_data, 0)
out_file = filename + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1 :]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1 :]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "_RD\n")
fd.write("#define " + out_file_ifdef + "_RD\n")
out_file_class = out_file_base.replace(".glsl.gen.h", "").title().replace("_", "").replace(".", "") + "ShaderRD"
fd.write("\n")
fd.write('#include "servers/rendering/rasterizer_rd/shader_rd.h"\n\n')
fd.write("class " + out_file_class + " : public ShaderRD {\n\n")
fd.write("public:\n\n")
fd.write("\t" + out_file_class + "() {\n\n")
if len(header_data.compute_lines):
fd.write("\t\tstatic const char _compute_code[] = {\n")
for x in header_data.compute_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write('\t\tsetup(nullptr, nullptr, _compute_code, "' + out_file_class + '");\n')
fd.write("\t}\n")
else:
fd.write("\t\tstatic const char _vertex_code[] = {\n")
for x in header_data.vertex_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write("\t\tstatic const char _fragment_code[]={\n")
for x in header_data.fragment_lines:
for c in x:
fd.write(str(ord(c)) + ",")
fd.write(str(ord("\n")) + ",")
fd.write("\t\t0};\n\n")
fd.write('\t\tsetup(_vertex_code, _fragment_code, nullptr, "' + out_file_class + '");\n')
fd.write("\t}\n")
fd.write("};\n\n")
fd.write("#endif\n")
fd.close()
def build_rd_headers(target, source, env):
for x in source:
build_rd_header(str(x))
class RAWHeaderStruct:
def __init__(self):
self.code = ""
def include_file_in_raw_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
text = ""
while line:
while line.find("#include ") != -1:
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
include_file_in_raw_header(included_file, header_data, depth + 1)
line = fs.readline()
header_data.code += line
line = fs.readline()
fs.close()
def build_raw_header(filename):
header_data = RAWHeaderStruct()
include_file_in_raw_header(filename, header_data, 0)
out_file = filename + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file.replace(".glsl.gen.h", "_shader_glsl")
out_file_base = out_file_base[out_file_base.rfind("/") + 1 :]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1 :]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + "_RAW_H\n")
fd.write("#define " + out_file_ifdef + "_RAW_H\n")
fd.write("\n")
fd.write("static const char " + out_file_base + "[] = {\n")
for c in header_data.code:
fd.write(str(ord(c)) + ",")
fd.write("\t\t0};\n\n")
fd.write("#endif\n")
fd.close()
def build_rd_headers(target, source, env):
for x in source:
build_rd_header(str(x))
def build_raw_headers(target, source, env):
for x in source:
build_raw_header(str(x))
if __name__ == "__main__":
subprocess_main(globals())
|
mit
| -8,014,658,679,955,484,000
| 31.653333
| 116
| 0.559412
| false
| 3.452538
| false
| false
| false
|
lionaneesh/sugarlabs-calculate
|
toolbars.py
|
1
|
14126
|
# -*- coding: utf-8 -*-
# toolbars.py, see CalcActivity.py for info
import pygtk
pygtk.require('2.0')
import gtk
from mathlib import MathLib
from sugar.graphics.palette import Palette
from sugar.graphics.menuitem import MenuItem
from sugar.graphics.toolbutton import ToolButton
from sugar.graphics.toggletoolbutton import ToggleToolButton
from sugar.graphics.style import GRID_CELL_SIZE
import logging
_logger = logging.getLogger('calc-activity')
from gettext import gettext as _
def _icon_exists(name):
if name == '':
return False
theme = gtk.icon_theme_get_default()
info = theme.lookup_icon(name, 0, 0)
if info:
return True
return False
class IconToolButton(ToolButton):
def __init__(self, icon_name, text, cb, help_cb=None, alt_html=''):
ToolButton.__init__(self)
if _icon_exists(icon_name):
self.set_icon(icon_name)
else:
if alt_html == '':
alt_html = icon_name
label = gtk.Label()
label.set_markup(alt_html)
label.show()
self.set_label_widget(label)
self.create_palette(text, help_cb)
self.connect('clicked', cb)
def create_palette(self, text, help_cb):
p = Palette(text)
if help_cb is not None:
item = MenuItem(_('Help'), 'action-help')
item.connect('activate', help_cb)
item.show()
p.menu.append(item)
self.set_palette(p)
class IconToggleToolButton(ToggleToolButton):
def __init__(self, items, cb, desc):
ToggleToolButton.__init__(self)
self.items = items
if 'icon' in items[0] and _icon_exists(items[0]['icon']):
self.set_named_icon(items[0]['icon'])
elif 'html' in items[0]:
self.set_label(items[0]['html'])
# self.set_tooltip(items[0][1])
self.set_tooltip(desc)
self.selected = 0
self.connect('clicked', self.toggle_button)
self.callback = cb
def toggle_button(self, w):
self.selected = (self.selected + 1) % len(self.items)
but = self.items[self.selected]
if 'icon' in but and _icon_exists(but['icon']):
self.set_named_icon(but['icon'])
elif 'html' in but:
_logger.info('Setting html: %s', but['html'])
self.set_label(but['html'])
# self.set_tooltip(but[1])
if self.callback is not None:
if 'html' in but:
self.callback(but['html'])
else:
self.callback(but)
class TextToggleToolButton(gtk.ToggleToolButton):
def __init__(self, items, cb, desc, index=False):
gtk.ToggleToolButton.__init__(self)
self.items = items
self.set_label(items[0])
self.selected = 0
self.connect('clicked', self.toggle_button)
self.callback = cb
self.index = index
self.set_tooltip_text(desc)
def toggle_button(self, w):
self.selected = (self.selected + 1) % len(self.items)
but = self.items[self.selected]
self.set_label(but)
if self.callback is not None:
if self.index:
self.callback(self.selected)
else:
self.callback(but)
class LineSeparator(gtk.SeparatorToolItem):
def __init__(self):
gtk.SeparatorToolItem.__init__(self)
self.set_draw(True)
class EditToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
copy_tool = ToolButton('edit-copy')
copy_tool.set_tooltip(_('Copy'))
copy_tool.set_accelerator(_('<ctrl>c'))
copy_tool.connect('clicked', lambda x: calc.text_copy())
self.insert(copy_tool, -1)
menu_item = MenuItem(_('Cut'))
try:
menu_item.set_accelerator(_('<ctrl>x'))
except AttributeError:
pass
menu_item.connect('activate', lambda x: calc.text_cut())
menu_item.show()
copy_tool.get_palette().menu.append(menu_item)
self.insert(IconToolButton('edit-paste', _('Paste'),
lambda x: calc.text_paste(),
alt_html='Paste'), -1)
self.show_all()
class AlgebraToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('algebra-square', _('Square'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '**2'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(square)'),
alt_html='x<sup>2</sup>'), -1)
self.insert(IconToolButton('algebra-sqrt', _('Square root'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sqrt'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sqrt)'),
alt_html='√x'), -1)
self.insert(IconToolButton('algebra-xinv', _('Inverse'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '**-1'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(inv)'),
alt_html='x<sup>-1</sup>'), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('algebra-exp', _('e to the power x'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'exp'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(exp)'),
alt_html='e<sup>x</sup>'), -1)
self.insert(IconToolButton('algebra-xpowy', _('x to the power y'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'pow'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(pow)'),
alt_html='x<sup>y</sup>'), -1)
self.insert(IconToolButton('algebra-ln', _('Natural logarithm'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'ln'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(ln)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('algebra-fac', _('Factorial'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'fac'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(fac)')), -1)
self.show_all()
class TrigonometryToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('trigonometry-sin', _('Sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sin'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sin)')), -1)
self.insert(IconToolButton('trigonometry-cos', _('Cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'cos'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(cos)')), -1)
self.insert(IconToolButton('trigonometry-tan', _('Tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'tan'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(tan)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('trigonometry-asin', _('Arc sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'asin'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(asin)')), -1)
self.insert(IconToolButton('trigonometry-acos', _('Arc cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'acos'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(acos)')), -1)
self.insert(IconToolButton('trigonometry-atan', _('Arc tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'atan'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(atan)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('trigonometry-sinh', _('Hyperbolic sine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'sinh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(sinh)')), -1)
self.insert(IconToolButton('trigonometry-cosh', _('Hyperbolic cosine'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'cosh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(cosh)')), -1)
self.insert(IconToolButton('trigonometry-tanh', _('Hyperbolic tangent'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'tanh'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(tanh)')), -1)
self.show_all()
class BooleanToolbar(gtk.Toolbar):
def __init__(self, calc):
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('boolean-and', _('Logical and'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '&'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(And)')), -1)
self.insert(IconToolButton('boolean-or', _('Logical or'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '|'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(Or)')), -1)
# self.insert(IconToolButton('boolean-xor', _('Logical xor'),
# lambda x: calc.button_pressed(calc.TYPE_OP_POST, '^'),
# lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(xor)')), -1)
self.insert(LineSeparator(), -1)
self.insert(IconToolButton('boolean-eq', _('Equals'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '==')), -1)
self.insert(IconToolButton('boolean-neq', _('Not equals'),
lambda x: calc.button_pressed(calc.TYPE_OP_POST, '!=')), -1)
self.show_all()
class MiscToolbar(gtk.Toolbar):
def __init__(self, calc, target_toolbar=None):
self._target_toolbar = target_toolbar
gtk.Toolbar.__init__(self)
self.insert(IconToolButton('constants-pi', _('Pi'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'pi'),
alt_html='π'), -1)
self.insert(IconToolButton('constants-e', _('e'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'e')), -1)
self.insert(IconToolButton('constants-eulersconstant', _('γ'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, '0.577215664901533')), -1)
self.insert(IconToolButton('constants-goldenratio', _('φ'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, '1.618033988749895')), -1)
self._line_separator1 = LineSeparator()
self._line_separator2 = LineSeparator()
self._plot_button = IconToolButton('plot', _('Plot'),
lambda x: calc.button_pressed(calc.TYPE_FUNCTION, 'plot'),
lambda x: calc.button_pressed(calc.TYPE_TEXT, 'help(plot)'))
el = [
{'icon': 'format-deg', 'desc': _('Degrees'), 'html': 'deg'},
{'icon': 'format-rad', 'desc': _('Radians'), 'html': 'rad'},
]
self._angle_button = IconToggleToolButton(
el,
lambda x: self.update_angle_type(x, calc),
_('Degrees / Radians'))
self.update_angle_type('deg', calc)
el = [
{'icon': 'format-sci', 'html': 'sci'},
{'icon': 'format-exp', 'html': 'exp'},
]
self._format_button = IconToggleToolButton(
el,
lambda x: self.update_format_type(x, calc),
_('Exponent / Scientific notation'))
el = [
{'icon': 'digits-9', 'html': '9'},
{'icon': 'digits-12', 'html': '12'},
{'icon': 'digits-15', 'html': '15'},
{'icon': 'digits-6', 'html': '6'},
]
self._digits_button = IconToggleToolButton(
el,
lambda x: self.update_digits(x, calc),
_('Number of shown digits'))
el = [
{'icon': 'base-10', 'html': '10'},
{'icon': 'base-2', 'html': '2'},
{'icon': 'base-16', 'html': '16'},
{'icon': 'base-8', 'html': '8'}
]
self._base_button = IconToggleToolButton(
el,
lambda x: self.update_int_base(x, calc),
_('Integer formatting base'))
self.update_layout()
self.show_all()
def update_layout(self):
if gtk.gdk.screen_width() < 14 * GRID_CELL_SIZE or \
self._target_toolbar is None:
target_toolbar = self
if self._target_toolbar is not None:
self._remove_buttons(self._target_toolbar)
else:
target_toolbar = self._target_toolbar
self._remove_buttons(self)
target_toolbar.insert(self._line_separator1, -1)
target_toolbar.insert(self._plot_button, -1)
target_toolbar.insert(self._line_separator2, -1)
target_toolbar.insert(self._angle_button, -1)
target_toolbar.insert(self._format_button, -1)
target_toolbar.insert(self._digits_button, -1)
target_toolbar.insert(self._base_button, -1)
def _remove_buttons(self, toolbar):
for item in [self._plot_button, self._line_separator1,
self._line_separator2, self._angle_button,
self._format_button, self._digits_button,
self._base_button]:
toolbar.remove(item)
def update_angle_type(self, text, calc):
var = calc.parser.get_var('angle_scaling')
if var is None:
_logger.warning('Variable angle_scaling not defined.')
return
if text == 'deg':
var.value = MathLib.ANGLE_DEG
elif text == 'rad':
var.value = MathLib.ANGLE_RAD
_logger.debug('Angle scaling: %s', var.value)
def update_format_type(self, text, calc):
if text == 'exp':
calc.ml.set_format_type(MathLib.FORMAT_EXPONENT)
elif text == 'sci':
calc.ml.set_format_type(MathLib.FORMAT_SCIENTIFIC)
_logger.debug('Format type: %s', calc.ml.format_type)
def update_digits(self, text, calc):
calc.ml.set_digit_limit(int(text))
_logger.debug('Digit limit: %s', calc.ml.digit_limit)
def update_int_base(self, text, calc):
calc.ml.set_integer_base(int(text))
_logger.debug('Integer base: %s', calc.ml.integer_base)
|
gpl-2.0
| -3,756,317,717,864,053,000
| 35.677922
| 84
| 0.569436
| false
| 3.579468
| false
| false
| false
|
babelsberg/babelsberg-r
|
tests/constraints/test_midpoint.py
|
1
|
1511
|
import py
from ..base import BaseTopazTest
E = 0.00000000000001
class TestConstraintVariableObject(BaseTopazTest):
def execute(self, space, code, *libs):
return [space.execute("""
require "%s"
%s
""" % (lib, code)) for lib in libs]
def test_midpoint(self, space):
w_res = space.execute("""
require "libcassowary"
res = []
class Point
def x; @x; end
def y; @y; end
def + q
Point.new(x+q.x,y+q.y)
end
def * n
Point.new(x*n, y*n)
end
def / n
Point.new(x/n, y/n)
end
def == o
o.x == self.x && o.y == self.y
end
def initialize(x, y)
@x = x
@y = y
end
end
class MidpointLine
attr_reader :end1, :end2, :midpoint
def initialize(pt1, pt2)
@end1 = pt1
@end2 = pt2
@midpoint = Point.new(0,0)
always { (end1 + end2) == (midpoint*2) }
end
def length
@end2.x - @end1.x
end
end
p1 = Point.new(0,10)
p2 = Point.new(20,30)
m = MidpointLine.new(p1,p2)
return p1.x, p1.y, p2.x, p2.y, m.midpoint.x, m.midpoint.y
""")
res = self.unwrap(space, w_res)
assert (res[0] + res[2]) / 2.0 == res[4]
assert (res[1] + res[3]) / 2.0 == res[5]
|
bsd-3-clause
| -4,466,466,756,293,176,000
| 21.552239
| 65
| 0.433488
| false
| 3.270563
| false
| false
| false
|
LawrenceK/fs_monitor
|
fs_monitor.py
|
1
|
5649
|
#!/usr/bin/python
# (C)opyright L.P.Klyne 2013
sw_topleft = 13
sw_topright = 7
sw_bottomleft = 12
sw_bottomright = 11
led_topleft = 22
led_topright = 18
led_bottomleft = 15
led_bottomright = 16
import logging
import os
import os.path
import time
import subprocess
import RPi.GPIO as GPIO
_log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(filename='example.log',level=logging.DEBUG)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
# place holder for adding command line
class Config:
pass
config = Config()
config.rsync_script = "rsync_a_b.sh"
class led:
def __init__(self, channel):
self.channel = channel
GPIO.setup(self.channel, GPIO.OUT, initial=GPIO.HIGH)
self.pwm = GPIO.PWM(self.channel, 1)
self.pwm.start(100.0)
def flash(self, dc):
_log.debug("flash led %s", self.channel)
self.pwm.ChangeDutyCycle(dc)
def on(self):
_log.debug("led on %s", self.channel)
self.pwm.ChangeDutyCycle(0.0)
def off(self):
_log.debug("led off %s", self.channel)
self.pwm.ChangeDutyCycle(100.0)
def is_on(self):
return GPIO.input(self.channel)
class switch:
def __init__(self, channel):
self.channel = channel
self.actions = [] # callable taking self
GPIO.setup(self.channel, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def add_action(self, action):
_log.debug("switch %s add action %s", self.channel, action)
self.actions.append(action)
if len(self.actions) == 1:
GPIO.add_event_detect(
self.channel, GPIO.BOTH,
callback=lambda c: self.edge(), bouncetime=200)
def remove_action(self, action):
if len(self.actions) == 0:
GPIO.remove_event_detect(self.channel)
def edge(self):
if self.is_on():
for a in self.actions:
_log.info("switch trigger %s action %s", self.channel, a)
a(self)
def is_on(self):
return not GPIO.input(self.channel) # pulled up
class disk:
# States:
# NotExist
# ExistUnmounted
# ExistsMounted
def __init__(self, name, mount, led):
self.devicename = name
self.mountpoint = mount
self.managed = False # have we seen the device
self.led = led
def is_mounted(self):
return os.path.ismount(self.mountpoint)
def device_exists(self):
return os.path.exists(self.devicename)
def check_mount(self):
# the aim here is to mount the disk when plugged in but to leave
# unmounted when initiated by a switch and to mount when device
# unplugged and plugged in again. I tried using udev but that
# resulted in the disk mounting to early if plugged in at boot.
if self.device_exists():
if self.managed:
#it is either allredy mounted or being unmounted
pass
else:
_log.info("Disk added %s", self.devicename)
if self.is_mounted():
self.led.on()
else:
self.do_mount()
self.managed = True
return True
elif self.managed:
_log.info("Disk removed %s", self.devicename)
self.managed = False
return False
def do_mount(self):
self.led.flash(10)
_log.info("Mounting %s on %s", self.devicename, self.mountpoint)
subprocess.check_call(["mount", self.mountpoint])
self.led.on()
return True
def do_unmount(self):
if self.is_mounted():
self.led.flash(50)
_log.info("Un Mounting %s from %s", self.devicename, self.mountpoint)
subprocess.check_call(["umount", self.mountpoint])
self.led.off()
return True
leds = [
led(led_topleft),
led(led_topright),
led(led_bottomleft),
led(led_bottomright)
]
switches = [
switch(sw_topleft), # Shutdown
switch(sw_topright), # RSync
switch(sw_bottomleft), # unmount sda1/diskA
switch(sw_bottomright) # unmount sdb1/diskB
]
disks = [
disk('/dev/sda1', '/mnt/diskA', leds[2]),
disk('/dev/sdb1', '/mnt/diskB', leds[3]),
]
rsync_p = None
def do_rsync(script):
global rsync_p
if rsync_p is None and disks[0].is_mounted() and disks[1].is_mounted():
scriptfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), script)
_log.info("Rsync %s to/from %s using %s",
os.path.join(disks[1].mountpoint, "*"),
disks[0].mountpoint,
scriptfile)
leds[1].flash(50)
rsync_p = subprocess.Popen(scriptfile, shell=True)
def do_shutdown():
_log.info("Halt fileserver")
subprocess.check_call(["halt"])
def main():
global rsync_p
leds[0].on()
try:
_log.info("Startup fileserver monitor")
switches[0].add_action(lambda s: do_shutdown())
switches[1].add_action(lambda s: do_rsync(config.rsync_script))
switches[2].add_action(lambda s: disks[0].do_unmount())
switches[3].add_action(lambda s: disks[1].do_unmount())
while(True):
time.sleep(2.0)
if rsync_p is None:
if any([d.check_mount() for d in disks]):
do_rsync(config.rsync_script)
elif rsync_p.poll() is not None: # has rsync completed
rsync_p.returncode
rsync_p = None
leds[1].off()
finally:
leds[0].off()
GPIO.cleanup()
main()
|
bsd-3-clause
| -5,335,742,982,079,361,000
| 26.691176
| 86
| 0.582404
| false
| 3.489191
| true
| false
| false
|
Undo1/Torch
|
script.py
|
1
|
1147
|
from BeautifulSoup import BeautifulSoup
import mysql.connector
import config #Where we keep our passwords and stuff
import tldextract
import itertools
cnx = mysql.connector.connect(user=config.MySQLUsername(), password=config.MySQLPassword(), host=config.MySQLHost(), database=config.MySQLDatabase())
cursor = cnx.cursor()
query = ("SELECT Body, Score FROM Posts WHERE PostTypeId=2")
cursor.execute(query)
sites = []
for (Body, Score) in cursor:
linksInAnswer = []
soup = BeautifulSoup(Body)
for link in soup.findAll('a'):
extract = tldextract.extract(link.get('href'))
# print extract
if len(extract.subdomain) > 0:
site = extract.subdomain + '.' + extract.domain + '.' + extract.suffix
else:
site = extract.domain + '.' + extract.suffix
site = link.get('href')
linksInAnswer.append(site)
linksInAnswer = set(linksInAnswer)
sites.extend(linksInAnswer)
groupedsites = [list(g) for k, g in itertools.groupby(sorted(sites))]
groupedsites = sorted(groupedsites, key=len, reverse=True)
for sitegroup in groupedsites:
if len(sitegroup) > 3: print str(len(sitegroup)) + " x " + sitegroup[0]
cursor.close()
cnx.close()
|
mit
| -2,881,982,992,902,040,000
| 27
| 149
| 0.72973
| false
| 3.305476
| false
| false
| false
|
mistercrunch/panoramix
|
superset/databases/api.py
|
1
|
34612
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any, Dict, List, Optional
from zipfile import ZipFile
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from marshmallow import ValidationError
from sqlalchemy.exc import NoSuchTableError, OperationalError, SQLAlchemyError
from superset import app, event_logger
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.commands.create import CreateDatabaseCommand
from superset.databases.commands.delete import DeleteDatabaseCommand
from superset.databases.commands.exceptions import (
DatabaseConnectionFailedError,
DatabaseCreateFailedError,
DatabaseDeleteDatasetsExistFailedError,
DatabaseDeleteFailedError,
DatabaseInvalidError,
DatabaseNotFoundError,
DatabaseUpdateFailedError,
)
from superset.databases.commands.export import ExportDatabasesCommand
from superset.databases.commands.importers.dispatcher import ImportDatabasesCommand
from superset.databases.commands.test_connection import TestConnectionDatabaseCommand
from superset.databases.commands.update import UpdateDatabaseCommand
from superset.databases.commands.validate import ValidateDatabaseParametersCommand
from superset.databases.dao import DatabaseDAO
from superset.databases.decorators import check_datasource_access
from superset.databases.filters import DatabaseFilter
from superset.databases.schemas import (
database_schemas_query_schema,
DatabaseFunctionNamesResponse,
DatabasePostSchema,
DatabasePutSchema,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
get_export_ids_schema,
SchemasResponseSchema,
SelectStarResponseSchema,
TableMetadataResponseSchema,
)
from superset.databases.utils import get_table_metadata
from superset.db_engine_specs import get_available_engine_specs
from superset.exceptions import InvalidPayloadFormatError, InvalidPayloadSchemaError
from superset.extensions import security_manager
from superset.models.core import Database
from superset.typing import FlaskResponse
from superset.utils.core import error_msg_from_exception
from superset.views.base_api import BaseSupersetModelRestApi, statsd_metrics
logger = logging.getLogger(__name__)
class DatabaseRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(Database)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
"table_metadata",
"select_star",
"schemas",
"test_connection",
"related_objects",
"function_names",
"available",
"validate_parameters",
}
resource_name = "database"
class_permission_name = "Database"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
allow_browser_login = True
base_filters = [["id", DatabaseFilter, lambda: []]]
show_columns = [
"id",
"database_name",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"configuration_method",
"allow_ctas",
"allow_cvas",
"allow_dml",
"backend",
"force_ctas_schema",
"allow_multi_schema_metadata_fetch",
"impersonate_user",
"encrypted_extra",
"extra",
"parameters",
"server_cert",
"sqlalchemy_uri",
]
list_columns = [
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_cost_estimate",
"allows_subquery",
"allows_virtual_table_explore",
"backend",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.last_name",
"database_name",
"explore_database_id",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
add_columns = [
"database_name",
"sqlalchemy_uri",
"cache_timeout",
"expose_in_sqllab",
"allow_run_async",
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"configuration_method",
"force_ctas_schema",
"impersonate_user",
"allow_multi_schema_metadata_fetch",
"extra",
"encrypted_extra",
"server_cert",
]
edit_columns = add_columns
list_select_columns = list_columns + ["extra", "sqlalchemy_uri", "password"]
order_columns = [
"allow_csv_upload",
"allow_dml",
"allow_run_async",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"database_name",
"expose_in_sqllab",
]
# Removes the local limit for the page size
max_page_size = -1
add_model_schema = DatabasePostSchema()
edit_model_schema = DatabasePutSchema()
apispec_parameter_schemas = {
"database_schemas_query_schema": database_schemas_query_schema,
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_tag = "Database"
openapi_spec_component_schemas = (
DatabaseFunctionNamesResponse,
DatabaseRelatedObjectsResponse,
DatabaseTestConnectionSchema,
DatabaseValidateParametersSchema,
TableMetadataResponseSchema,
SelectStarResponseSchema,
SchemasResponseSchema,
)
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Database
---
post:
description: >-
Create a new Database.
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Database added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
302:
description: Redirects to the current digest
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDatabaseCommand(g.user, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = new_model.sqlalchemy_uri
return self.response(201, id=new_model.id, result=item)
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put( # pylint: disable=too-many-return-statements, arguments-differ
self, pk: int
) -> Response:
"""Changes a Database
---
put:
description: >-
Changes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Database changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDatabaseCommand(g.user, pk, item).run()
# Return censored version for sqlalchemy URI
item["sqlalchemy_uri"] = changed_model.sqlalchemy_uri
return self.response(200, id=changed_model.id, result=item)
except DatabaseNotFoundError:
return self.response_404()
except DatabaseInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatabaseConnectionFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response: # pylint: disable=arguments-differ
"""Deletes a Database
---
delete:
description: >-
Deletes a Database.
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Database deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDatabaseCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatabaseNotFoundError:
return self.response_404()
except DatabaseDeleteDatasetsExistFailedError as ex:
return self.response_422(message=str(ex))
except DatabaseDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>/schemas/")
@protect()
@safe
@rison(database_schemas_query_schema)
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".schemas",
log_to_statsd=False,
)
def schemas(self, pk: int, **kwargs: Any) -> FlaskResponse:
"""Get all schemas from a database
---
get:
description: Get all schemas from a database
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/database_schemas_query_schema'
responses:
200:
description: A List of all schemas from the database
content:
application/json:
schema:
$ref: "#/components/schemas/SchemasResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = self.datamodel.get(pk, self._base_filters)
if not database:
return self.response_404()
try:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=kwargs["rison"].get("force", False),
)
schemas = security_manager.get_schemas_accessible_by_user(database, schemas)
return self.response(200, result=schemas)
except OperationalError:
return self.response(
500, message="There was an error connecting to the database"
)
@expose("/<int:pk>/table/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".table_metadata",
log_to_statsd=False,
)
def table_metadata(
self, database: Database, table_name: str, schema_name: str
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database table metadata
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: Table metadata information
content:
application/json:
schema:
$ref: "#/components/schemas/TableMetadataResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.table_metadata.__name__)
try:
table_info = get_table_metadata(database, table_name, schema_name)
except SQLAlchemyError as ex:
self.incr_stats("error", self.table_metadata.__name__)
return self.response_422(error_msg_from_exception(ex))
self.incr_stats("success", self.table_metadata.__name__)
return self.response(200, **table_info)
@expose("/<int:pk>/select_star/<table_name>/", methods=["GET"])
@expose("/<int:pk>/select_star/<table_name>/<schema_name>/", methods=["GET"])
@protect()
@check_datasource_access
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.select_star",
log_to_statsd=False,
)
def select_star(
self, database: Database, table_name: str, schema_name: Optional[str] = None
) -> FlaskResponse:
"""Table schema info
---
get:
description: Get database select star for table
parameters:
- in: path
schema:
type: integer
name: pk
description: The database id
- in: path
schema:
type: string
name: table_name
description: Table name
- in: path
schema:
type: string
name: schema_name
description: Table schema
responses:
200:
description: SQL statement for a select star for table
content:
application/json:
schema:
$ref: "#/components/schemas/SelectStarResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self.incr_stats("init", self.select_star.__name__)
try:
result = database.select_star(
table_name, schema_name, latest_partition=True, show_cols=True
)
except NoSuchTableError:
self.incr_stats("error", self.select_star.__name__)
return self.response(404, message="Table not found on the database")
self.incr_stats("success", self.select_star.__name__)
return self.response(200, result=result)
@expose("/test_connection", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".test_connection",
log_to_statsd=False,
)
def test_connection( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""Tests a database connection
---
post:
description: >-
Tests a database connection
requestBody:
description: Database schema
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseTestConnectionSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = DatabaseTestConnectionSchema().load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
TestConnectionDatabaseCommand(g.user, item).run()
return self.response(200, message="OK")
@expose("/<int:pk>/related_objects/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".related_objects",
log_to_statsd=False,
)
def related_objects(self, pk: int) -> Response:
"""Get charts and dashboards count associated to a database
---
get:
description:
Get charts and dashboards count associated to a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseRelatedObjectsResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
data = DatabaseDAO.get_related_objects(pk)
charts = [
{
"id": chart.id,
"slice_name": chart.slice_name,
"viz_type": chart.viz_type,
}
for chart in data["charts"]
]
dashboards = [
{
"id": dashboard.id,
"json_metadata": dashboard.json_metadata,
"slug": dashboard.slug,
"title": dashboard.dashboard_title,
}
for dashboard in data["dashboards"]
]
return self.response(
200,
charts={"count": len(charts), "result": charts},
dashboards={"count": len(dashboards), "result": dashboards},
)
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self, **kwargs: Any) -> Response:
"""Export database(s) with associated datasets
---
get:
description: Download database(s) and associated dataset(s) as a zip file
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: A zip file with database(s) and dataset(s) as YAML
content:
application/zip:
schema:
type: string
format: binary
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"database_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDatabasesCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DatabaseNotFoundError:
return self.response_404()
buf.seek(0)
return send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import database(s) with associated datasets
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing databases?
type: boolean
responses:
200:
description: Database import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDatabasesCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
@expose("/<int:pk>/function_names/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".function_names",
log_to_statsd=False,
)
def function_names(self, pk: int) -> Response:
"""Get function names supported by a database
---
get:
description:
Get function names supported by a database
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseFunctionNamesResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
database = DatabaseDAO.find_by_id(pk)
if not database:
return self.response_404()
return self.response(200, function_names=database.function_names,)
@expose("/available/", methods=["GET"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".available",
log_to_statsd=False,
)
def available(self) -> Response:
"""Return names of databases currently available
---
get:
description:
Get names of databases currently available
responses:
200:
description: Database names
content:
application/json:
schema:
type: array
items:
type: object
properties:
name:
description: Name of the database
type: string
engine:
description: Name of the SQLAlchemy engine
type: string
available_drivers:
description: Installed drivers for the engine
type: array
items:
type: string
default_driver:
description: Default driver for the engine
type: string
preferred:
description: Is the database preferred?
type: boolean
sqlalchemy_uri_placeholder:
description: Example placeholder for the SQLAlchemy URI
type: string
parameters:
description: JSON schema defining the needed parameters
type: object
400:
$ref: '#/components/responses/400'
500:
$ref: '#/components/responses/500'
"""
preferred_databases: List[str] = app.config.get("PREFERRED_DATABASES", [])
available_databases = []
for engine_spec, drivers in get_available_engine_specs().items():
payload: Dict[str, Any] = {
"name": engine_spec.engine_name,
"engine": engine_spec.engine,
"available_drivers": sorted(drivers),
"preferred": engine_spec.engine_name in preferred_databases,
}
if hasattr(engine_spec, "default_driver"):
payload["default_driver"] = engine_spec.default_driver # type: ignore
# show configuration parameters for DBs that support it
if (
hasattr(engine_spec, "parameters_json_schema")
and hasattr(engine_spec, "sqlalchemy_uri_placeholder")
and getattr(engine_spec, "default_driver") in drivers
):
payload[
"parameters"
] = engine_spec.parameters_json_schema() # type: ignore
payload[
"sqlalchemy_uri_placeholder"
] = engine_spec.sqlalchemy_uri_placeholder # type: ignore
available_databases.append(payload)
# sort preferred first
response = sorted(
(payload for payload in available_databases if payload["preferred"]),
key=lambda payload: preferred_databases.index(payload["name"]),
)
# add others
response.extend(
sorted(
(
payload
for payload in available_databases
if not payload["preferred"]
),
key=lambda payload: payload["name"],
)
)
return self.response(200, databases=response)
@expose("/validate_parameters", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".validate_parameters",
log_to_statsd=False,
)
def validate_parameters( # pylint: disable=too-many-return-statements
self,
) -> FlaskResponse:
"""validates database connection parameters
---
post:
description: >-
Validates parameters used to connect to a database
requestBody:
description: DB-specific parameters
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DatabaseValidateParametersSchema"
responses:
200:
description: Database Test Connection
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
raise InvalidPayloadFormatError("Request is not JSON")
try:
payload = DatabaseValidateParametersSchema().load(request.json)
except ValidationError as error:
raise InvalidPayloadSchemaError(error)
command = ValidateDatabaseParametersCommand(g.user, payload)
command.run()
return self.response(200, message="OK")
|
apache-2.0
| -2,846,362,746,387,505,000
| 33.405567
| 88
| 0.53649
| false
| 4.681093
| true
| false
| false
|
jonathf/chaospy
|
chaospy/distributions/collection/wald.py
|
1
|
2480
|
"""Wald distribution."""
import numpy
from scipy import special
import chaospy
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class wald(SimpleDistribution):
"""Wald distribution."""
def __init__(self, mu):
super(wald, self).__init__(dict(mu=mu))
def _pdf(self, x, mu):
out = numpy.zeros(x.shape)
indices = x > 0
out[indices] = 1.0/numpy.sqrt(2*numpy.pi*x[indices])
out[indices] *= numpy.exp(-(1-mu*x[indices])**2.0 / (2*x[indices]*mu**2.0))
return out
def _cdf(self, x, mu):
trm1 = 1./mu-x
trm2 = 1./mu+x
isqx = numpy.full_like(x, numpy.inf)
indices = x > 0
isqx[indices] = 1./numpy.sqrt(x[indices])
out = 1.-special.ndtr(isqx*trm1)
out -= numpy.exp(2.0/mu)*special.ndtr(-isqx*trm2)
out = numpy.where(x == numpy.inf, 1, out)
out = numpy.where(x == -numpy.inf, 0, out)
return out
def _lower(self, mu):
return 0.
def _upper(self, mu):
qloc = numpy.repeat(1-1e-12, mu.size)
out = chaospy.approximate_inverse(
distribution=self,
idx=0,
qloc=qloc,
parameters=dict(mu=mu),
bounds=(0., 60+numpy.e**(1./(mu+0.1))),
tolerance=1e-15,
)
return out
class Wald(ShiftScaleDistribution):
"""
Wald distribution.
Reciprocal inverse Gaussian distribution.
Args:
mu (float, Distribution):
Mean of the normal distribution
scale (float, Distribution):
Scaling parameter
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.Wald(0.5)
>>> distribution
Wald(0.5)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([ 0. , 1.416, 2.099, 2.94 , 4.287, 54.701])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0. , 0.297, 0.275, 0.2 , 0.105, 0. ])
>>> distribution.sample(4).round(3)
array([0.61 , 1.401, 1.274, 2.115])
"""
def __init__(self, mu=1, scale=1, shift=0):
super(Wald, self).__init__(
dist=wald(mu),
scale=scale,
shift=shift,
repr_args=[mu],
)
|
mit
| -2,990,161,795,545,608,000
| 26.865169
| 83
| 0.522177
| false
| 3.246073
| false
| false
| false
|
crchemist/scioncc
|
src/ion/util/preload.py
|
1
|
24501
|
#!/usr/bin/env python
"""Utility to bulk load resources into the system, e.g. for initial preload"""
__author__ = 'Michael Meisinger'
import yaml
import re
import os
from pyon.core import MSG_HEADER_ACTOR, MSG_HEADER_ROLES, MSG_HEADER_VALID
from pyon.core.bootstrap import get_service_registry
from pyon.core.governance import get_system_actor
from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id
from pyon.ion.resource import get_restype_lcsm
from pyon.public import CFG, log, BadRequest, Inconsistent, NotFound, IonObject, RT, OT, AS, LCS, named_any, get_safe, get_ion_ts, PRED
from ion.util.parse_utils import get_typed_value
# Well known action config keys
KEY_SCENARIO = "scenario"
KEY_ID = "id"
KEY_OWNER = "owner"
KEY_LCSTATE = "lcstate"
KEY_ORGS = "orgs"
# Well known aliases
ID_ORG_ION = "ORG_ION"
ID_SYSTEM_ACTOR = "USER_SYSTEM"
UUID_RE = '^[0-9a-fA-F]{32}$'
class Preloader(object):
def initialize_preloader(self, process, preload_cfg):
log.info("Initialize preloader")
self.process = process
self.preload_cfg = preload_cfg or {}
self._init_preload()
self.rr = self.process.container.resource_registry
self.bulk = self.preload_cfg.get("bulk", False) is True
# Loads internal bootstrapped resource ids that will be referenced during preload
self._load_system_ids()
# Load existing resources by preload ID
self._prepare_incremental()
def _init_preload(self):
self.obj_classes = {} # Cache of class for object types
self.object_definitions = None # Dict of preload rows before processing
self.resource_ids = {} # Holds a mapping of preload IDs to internal resource ids
self.resource_objs = {} # Holds a mapping of preload IDs to the actual resource objects
self.resource_assocs = {} # Holds a mapping of existing associations list by predicate
self.bulk_resources = {} # Keeps resource objects to be bulk inserted/updated
self.bulk_associations = {} # Keeps association objects to be bulk inserted/updated
self.bulk_existing = set() # This keeps the ids of the bulk objects to update instead of delete
def preload_master(self, filename, skip_steps=None):
"""Executes a preload master file"""
log.info("Preloading from master file: %s", filename)
with open(filename, "r") as f:
master_yml = f.read()
master_cfg = yaml.load(master_yml)
if not "preload_type" in master_cfg or master_cfg["preload_type"] != "steps":
raise BadRequest("Invalid preload steps file")
for step in master_cfg["steps"]:
if skip_steps and step in skip_steps:
log.info("Skipping step %s" % step)
continue
step_filename = "%s/%s.yml" % (os.path.dirname(filename), step)
self._execute_step(step_filename)
def _execute_step(self, filename):
"""Executes a preload step file"""
with open(filename, "r") as f:
step_yml = f.read()
step_cfg = yaml.safe_load(step_yml)
if not "preload_type" in step_cfg or step_cfg["preload_type"] != "actions":
raise BadRequest("Invalid preload actions file")
for action in step_cfg["actions"]:
try:
self._execute_action(action)
except Exception as ex:
log.warn("Action failed: " + str(ex), exc_info=True)
self.commit_bulk()
def _execute_action(self, action):
"""Executes a preload action"""
action_type = action["action"]
#log.debug("Preload action %s id=%s", action_type, action.get("id", ""))
scope, func_type = action_type.split(":", 1)
default_funcname = "_load_%s_%s" % (scope, func_type)
action_func = getattr(self.process, default_funcname, None)
if not action_func:
action_funcname = self.preload_cfg["action_plugins"].get(action_type, {})
if not action_funcname:
log.warn("Unknown action: %s", action_type)
return
action_func = getattr(self.process, action_funcname, None)
if not action_func:
log.warn("Action function %s not found for action %s", action_funcname, action_type)
return
action_func(action)
# -------------------------------------------------------------------------
def _load_system_ids(self):
"""Read some system objects for later reference"""
org_objs, _ = self.rr.find_resources(name="ION", restype=RT.Org, id_only=False)
if not org_objs:
raise BadRequest("ION org not found. Was system force_cleaned since bootstrap?")
ion_org_id = org_objs[0]._id
self._register_id(ID_ORG_ION, ion_org_id, org_objs[0])
system_actor = get_system_actor()
system_actor_id = system_actor._id if system_actor else 'anonymous'
self._register_id(ID_SYSTEM_ACTOR, system_actor_id, system_actor if system_actor else None)
def _prepare_incremental(self):
"""
Look in the resource registry for any resources that have a preload ID on them so that
they can be referenced under this preload ID during this load run.
"""
log.debug("Loading prior preloaded resources for reference")
res_objs, res_keys = self.rr.find_resources_ext(alt_id_ns="PRE", id_only=False)
res_preload_ids = [key['alt_id'] for key in res_keys]
res_ids = [obj._id for obj in res_objs]
log.debug("Found %s previously preloaded resources", len(res_objs))
res_assocs = self.rr.find_associations(predicate="*", id_only=False)
[self.resource_assocs.setdefault(assoc["p"], []).append(assoc) for assoc in res_assocs]
log.debug("Found %s existing associations", len(res_assocs))
existing_resources = dict(zip(res_preload_ids, res_objs))
if len(existing_resources) != len(res_objs):
raise BadRequest("Stored preload IDs are NOT UNIQUE!!! Cannot link to old resources")
res_id_mapping = dict(zip(res_preload_ids, res_ids))
self.resource_ids.update(res_id_mapping)
res_obj_mapping = dict(zip(res_preload_ids, res_objs))
self.resource_objs.update(res_obj_mapping)
def create_object_from_cfg(self, cfg, objtype, key="resource", prefix="", existing_obj=None):
"""
Construct an IonObject of a determined type from given config dict with attributes.
Convert all attributes according to their schema target type. Supports nested objects.
Supports edit of objects of same type.
"""
log.trace("Create object type=%s, prefix=%s", objtype, prefix)
if objtype == "dict":
schema = None
else:
schema = self._get_object_class(objtype)._schema
obj_fields = {} # Attributes for IonObject creation as dict
nested_done = set() # Names of attributes with nested objects already created
obj_cfg = get_safe(cfg, key)
for subkey, value in obj_cfg.iteritems():
if subkey.startswith(prefix):
attr = subkey[len(prefix):]
if '.' in attr: # We are a parent entry
# TODO: Make sure to not create nested object multiple times
slidx = attr.find('.')
nested_obj_field = attr[:slidx]
parent_field = attr[:slidx+1]
nested_prefix = prefix + parent_field # prefix plus nested object name
if '[' in nested_obj_field and nested_obj_field[-1] == ']':
sqidx = nested_obj_field.find('[')
nested_obj_type = nested_obj_field[sqidx+1:-1]
nested_obj_field = nested_obj_field[:sqidx]
elif objtype == "dict":
nested_obj_type = "dict"
else:
nested_obj_type = schema[nested_obj_field]['type']
# Make sure to not create the same nested object twice
if parent_field in nested_done:
continue
# Support direct indexing in a list
list_idx = -1
if nested_obj_type.startswith("list/"):
_, list_idx, nested_obj_type = nested_obj_type.split("/")
list_idx = int(list_idx)
log.trace("Get nested object field=%s type=%s, prefix=%s", nested_obj_field, nested_obj_type, prefix)
nested_obj = self.create_object_from_cfg(cfg, nested_obj_type, key, nested_prefix)
if list_idx >= 0:
my_list = obj_fields.setdefault(nested_obj_field, [])
if list_idx >= len(my_list):
my_list[len(my_list):list_idx] = [None]*(list_idx-len(my_list)+1)
my_list[list_idx] = nested_obj
else:
obj_fields[nested_obj_field] = nested_obj
nested_done.add(parent_field)
elif objtype == "dict":
# TODO: What about type?
obj_fields[attr] = value
elif attr in schema: # We are the leaf attribute
try:
if value:
fieldvalue = get_typed_value(value, schema[attr])
obj_fields[attr] = fieldvalue
except Exception:
log.warn("Object type=%s, prefix=%s, field=%s cannot be converted to type=%s. Value=%s",
objtype, prefix, attr, schema[attr]['type'], value, exc_info=True)
#fieldvalue = str(fieldvalue)
else:
# warn about unknown fields just once -- not on each row
log.warn("Skipping unknown field in %s: %s%s", objtype, prefix, attr)
if objtype == "dict":
obj = obj_fields
else:
if existing_obj:
# Edit attributes
if existing_obj.type_ != objtype:
raise Inconsistent("Cannot edit resource. Type mismatch old=%s, new=%s" % (existing_obj.type_, objtype))
# TODO: Don't edit empty nested attributes
for attr in list(obj_fields.keys()):
if not obj_fields[attr]:
del obj_fields[attr]
for attr in ('alt_ids','_id','_rev','type_'):
if attr in obj_fields:
del obj_fields[attr]
existing_obj.__dict__.update(obj_fields)
log.trace("Update object type %s using field names %s", objtype, obj_fields.keys())
obj = existing_obj
else:
if cfg.get(KEY_ID, None) and 'alt_ids' in schema:
if 'alt_ids' in obj_fields:
obj_fields['alt_ids'].append("PRE:"+cfg[KEY_ID])
else:
obj_fields['alt_ids'] = ["PRE:"+cfg[KEY_ID]]
log.trace("Create object type %s from field names %s", objtype, obj_fields.keys())
obj = IonObject(objtype, **obj_fields)
return obj
def _get_object_class(self, objtype):
if objtype in self.obj_classes:
return self.obj_classes[objtype]
try:
obj_class = named_any("interface.objects.%s" % objtype)
self.obj_classes[objtype] = obj_class
return obj_class
except Exception:
log.error('failed to find class for type %s' % objtype)
def _get_service_client(self, service):
return get_service_registry().services[service].client(process=self.process)
def _register_id(self, alias, resid, res_obj=None, is_update=False):
"""Keep preload resource in internal dict for later reference"""
if not is_update and alias in self.resource_ids:
raise BadRequest("ID alias %s used twice" % alias)
self.resource_ids[alias] = resid
self.resource_objs[alias] = res_obj
log.trace("Added resource alias=%s to id=%s", alias, resid)
def _read_resource_id(self, res_id):
existing_obj = self.rr.read(res_id)
self.resource_objs[res_id] = existing_obj
self.resource_ids[res_id] = res_id
return existing_obj
def _get_resource_id(self, alias_id):
"""Returns resource ID from preload alias ID, scanning also for real resource IDs to be loaded"""
if alias_id in self.resource_ids:
return self.resource_ids[alias_id]
elif re.match(UUID_RE, alias_id):
# This is obviously an ID of a real resource - let it fail if not existing
self._read_resource_id(alias_id)
log.debug("Referencing existing resource via direct ID: %s", alias_id)
return alias_id
else:
raise KeyError(alias_id)
def _get_resource_obj(self, res_id, silent=False):
"""Returns a resource object from one of the memory locations for given preload or internal ID"""
if self.bulk and res_id in self.bulk_resources:
return self.bulk_resources[res_id]
elif res_id in self.resource_objs:
return self.resource_objs[res_id]
else:
# Real ID not alias - reverse lookup
alias_ids = [alias_id for alias_id,int_id in self.resource_ids.iteritems() if int_id==res_id]
if alias_ids:
return self.resource_objs[alias_ids[0]]
if not silent:
log.debug("_get_resource_obj(): No object found for '%s'", res_id)
return None
def _resource_exists(self, res_id):
if not res_id:
return None
res = self._get_resource_obj(res_id, silent=True)
return res is not None
def _has_association(self, sub, pred, obj):
"""Returns True if the described associated already exists."""
for assoc in self.resource_assocs.get(pred, []):
if assoc.s == sub and assoc.o == obj:
return True
return False
def _update_resource_obj(self, res_id):
"""Updates an existing resource object"""
res_obj = self._get_resource_obj(res_id)
self.rr.update(res_obj)
log.debug("Updating resource %s (pre=%s id=%s): '%s'", res_obj.type_, res_id, res_obj._id, res_obj.name)
def _get_alt_id(self, res_obj, prefix):
alt_ids = getattr(res_obj, 'alt_ids', [])
for alt_id in alt_ids:
if alt_id.startswith(prefix+":"):
alt_id_str = alt_id[len(prefix)+1:]
return alt_id_str
def _get_op_headers(self, owner_id, force_user=False):
headers = {}
if owner_id:
owner_id = self.resource_ids[owner_id]
headers[MSG_HEADER_ACTOR] = owner_id
headers[MSG_HEADER_ROLES] = {'ION': ['SUPERUSER', 'MODERATOR']}
headers[MSG_HEADER_VALID] = '0'
elif force_user:
return self._get_system_actor_headers()
return headers
def _get_system_actor_headers(self):
return {MSG_HEADER_ACTOR: self.resource_ids[ID_SYSTEM_ACTOR],
MSG_HEADER_ROLES: {'ION': ['SUPERUSER', 'MODERATOR']},
MSG_HEADER_VALID: '0'}
def basic_resource_create(self, cfg, restype, svcname, svcop, key="resource",
set_attributes=None, support_bulk=False, **kwargs):
"""
Orchestration method doing the following:
- create an object from a row,
- add any defined constraints,
- make a service call to create resource for given object,
- share resource in a given Org
- store newly created resource id and obj for future reference
- (optional) support bulk create/update
"""
res_id_alias = cfg[KEY_ID]
existing_obj = None
if res_id_alias in self.resource_ids:
# TODO: Catch case when ID used twice
existing_obj = self.resource_objs[res_id_alias]
elif re.match(UUID_RE, res_id_alias):
# This is obviously an ID of a real resource
try:
existing_obj = self._read_resource_id(res_id_alias)
log.debug("Updating existing resource via direct ID: %s", res_id_alias)
except NotFound as nf:
pass # Ok it was not there after all
try:
res_obj = self.create_object_from_cfg(cfg, restype, key, "", existing_obj=existing_obj)
except Exception as ex:
log.exception("Error creating object")
raise
if set_attributes:
for attr, attr_val in set_attributes.iteritems():
setattr(res_obj, attr, attr_val)
if existing_obj:
res_id = self.resource_ids[res_id_alias]
if self.bulk and support_bulk:
self.bulk_resources[res_id] = res_obj
self.bulk_existing.add(res_id) # Make sure to remember which objects are existing
else:
# TODO: Use the appropriate service call here
self.rr.update(res_obj)
else:
if self.bulk and support_bulk:
res_id = self._create_bulk_resource(res_obj, res_id_alias)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None))
self._resource_assign_owner(headers, res_obj)
self._resource_advance_lcs(cfg, res_id)
else:
svc_client = self._get_service_client(svcname)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None), force_user=True)
res_id = getattr(svc_client, svcop)(res_obj, headers=headers, **kwargs)
if res_id:
if svcname == "resource_registry" and svcop == "create":
res_id = res_id[0]
res_obj._id = res_id
self._register_id(res_id_alias, res_id, res_obj)
self._resource_assign_org(cfg, res_id)
return res_id
def _create_bulk_resource(self, res_obj, res_alias=None):
if not hasattr(res_obj, "_id"):
res_obj._id = create_unique_resource_id()
ts = get_ion_ts()
if hasattr(res_obj, "ts_created") and not res_obj.ts_created:
res_obj.ts_created = ts
if hasattr(res_obj, "ts_updated") and not res_obj.ts_updated:
res_obj.ts_updated = ts
res_id = res_obj._id
self.bulk_resources[res_id] = res_obj
if res_alias:
self._register_id(res_alias, res_id, res_obj)
return res_id
def _resource_advance_lcs(self, cfg, res_id):
"""
Change lifecycle state of object to requested state. Supports bulk.
"""
res_obj = self._get_resource_obj(res_id)
restype = res_obj.type_
lcsm = get_restype_lcsm(restype)
initial_lcmat = lcsm.initial_state if lcsm else LCS.DEPLOYED
initial_lcav = lcsm.initial_availability if lcsm else AS.AVAILABLE
lcstate = cfg.get(KEY_LCSTATE, None)
if lcstate:
row_lcmat, row_lcav = lcstate.split("_", 1)
if self.bulk and res_id in self.bulk_resources:
self.bulk_resources[res_id].lcstate = row_lcmat
self.bulk_resources[res_id].availability = row_lcav
else:
if row_lcmat != initial_lcmat: # Vertical transition
self.rr.set_lifecycle_state(res_id, row_lcmat)
if row_lcav != initial_lcav: # Horizontal transition
self.rr.set_lifecycle_state(res_id, row_lcav)
elif self.bulk and res_id in self.bulk_resources:
# Set the lcs to resource type appropriate initial values
self.bulk_resources[res_id].lcstate = initial_lcmat
self.bulk_resources[res_id].availability = initial_lcav
def _resource_assign_org(self, cfg, res_id):
"""
Shares the resource in the given orgs. Supports bulk.
"""
org_ids = cfg.get(KEY_ORGS, None)
if org_ids:
org_ids = get_typed_value(org_ids, targettype="simplelist")
for org_id in org_ids:
org_res_id = self.resource_ids[org_id]
if self.bulk and res_id in self.bulk_resources:
# Note: org_id is alias, res_id is internal ID
org_obj = self._get_resource_obj(org_id)
res_obj = self._get_resource_obj(res_id)
# Create association to given Org
assoc_obj = self._create_association(org_obj, PRED.hasResource, res_obj, support_bulk=True)
else:
svc_client = self._get_service_client("org_management")
svc_client.share_resource(org_res_id, res_id, headers=self._get_system_actor_headers())
def _resource_assign_owner(self, headers, res_obj):
if self.bulk and 'ion-actor-id' in headers:
owner_id = headers['ion-actor-id']
user_obj = self._get_resource_obj(owner_id)
if owner_id and owner_id != 'anonymous':
self._create_association(res_obj, PRED.hasOwner, user_obj, support_bulk=True)
def basic_associations_create(self, cfg, res_alias, support_bulk=False):
for assoc in cfg.get("associations", []):
direction, other_id, predicate = assoc.split(",")
res_id = self.resource_ids[res_alias]
other_res_id = self.resource_ids[other_id]
if direction == "TO":
self._create_association(res_id, predicate, other_res_id, support_bulk=support_bulk)
elif direction == "FROM":
self._create_association(other_res_id, predicate, res_id, support_bulk=support_bulk)
def _create_association(self, subject=None, predicate=None, obj=None, support_bulk=False):
"""
Create an association between two IonObjects with a given predicate.
Supports bulk mode
"""
if self.bulk and support_bulk:
if not subject or not predicate or not obj:
raise BadRequest("Association must have all elements set: %s/%s/%s" % (subject, predicate, obj))
if isinstance(subject, basestring):
subject = self._get_resource_obj(subject)
if "_id" not in subject:
raise BadRequest("Subject id not available")
subject_id = subject._id
st = subject.type_
if isinstance(obj, basestring):
obj = self._get_resource_obj(obj)
if "_id" not in obj:
raise BadRequest("Object id not available")
object_id = obj._id
ot = obj.type_
assoc_id = create_unique_association_id()
assoc_obj = IonObject("Association",
s=subject_id, st=st,
p=predicate,
o=object_id, ot=ot,
ts=get_ion_ts())
assoc_obj._id = assoc_id
self.bulk_associations[assoc_id] = assoc_obj
return assoc_id, '1-norev'
else:
return self.rr.create_association(subject, predicate, obj)
def commit_bulk(self):
if not self.bulk_resources and not self.bulk_associations:
return
# Perform the create for resources
res_new = [obj for obj in self.bulk_resources.values() if obj["_id"] not in self.bulk_existing]
res = self.rr.rr_store.create_mult(res_new, allow_ids=True)
# Perform the update for resources
res_upd = [obj for obj in self.bulk_resources.values() if obj["_id"] in self.bulk_existing]
res = self.rr.rr_store.update_mult(res_upd)
# Perform the create for associations
assoc_new = [obj for obj in self.bulk_associations.values()]
res = self.rr.rr_store.create_mult(assoc_new, allow_ids=True)
log.info("Bulk stored {} resource objects ({} updates) and {} associations".format(len(res_new), len(res_upd), len(assoc_new)))
self.bulk_resources.clear()
self.bulk_associations.clear()
self.bulk_existing.clear()
|
bsd-2-clause
| -1,043,299,870,787,946,800
| 44.288355
| 135
| 0.572303
| false
| 3.927701
| false
| false
| false
|
stormi/tsunami
|
outils/gestionnaire_module/commande.py
|
1
|
3556
|
# -*-coding:Utf-8 -*
# Copyright (c) 2011 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .divers import *
CMD_MESSAGE = """
\"""Package contenant les commandes du module {module}.\"""
"""
CONTENU_CMD = """
\"""Package contenant la commande '{lcommande}'.
\"""
from primaires.interpreteur.commande.commande import Commande
class Cmd{commande}(Commande):
\"""Commande '{lcommande}'.
\"""
def __init__(self):
\"""Constructeur de la commande\"""
Commande.__init__(self, "{lcommande}", "{commande_en}")
self.nom_categorie = "{categorie}"
self.schema = "{schema}"
self.aide_courte = "TODO"
self.aide_longue = \\
"TODO"
def interpreter(self, personnage, dic_masques):
\"""Interprétation de la commande\"""
pass
"""
def ajouter(rep, module, typeMod, entete, commande):
if len(commande) < 2:
print("Pas assez d'argument")
return
commande_fr = commande[1].lower()
commande_en = commande_fr
schema = ""
categorie = ""
if len(commande) > 2:
commande_en = commande[2]
if len(commande) > 3:
categorie = commande[3]
if len(commande) > 4:
schema = commande[4]
contenu = CONTENU_CMD.format(
lcommande=commande_fr,
commande=commande_fr.capitalize(),
commande_en=commande_en,
categorie = categorie,
schema = schema)
repcmd = rep + "commandes/" + commande_fr + "/"
os.makedirs(repcmd)
path = repcmd + "__init__.py"
if os.path.exists(path):
print("Une commande portant ce nom existait, annulation")
return
write(path, entete + contenu)
path = rep + "commandes/" + "__init__.py"
if not os.path.exists(path):
write(path, entete + CMD_MESSAGE.format(module=module))
append(path, "from . import {commande}\n".format(commande=commande_fr))
print("ATTENTION : vous devze modifié le __init__.py du module " \
"pour y rajouter cette commande")
|
bsd-3-clause
| 6,856,015,116,057,477,000
| 31.605505
| 79
| 0.664603
| false
| 3.611789
| false
| false
| false
|
catapult-project/catapult
|
common/py_vulcanize/third_party/rjsmin/bench/write.py
|
3
|
10657
|
#!/usr/bin/env python
# -*- coding: ascii -*-
r"""
=========================
Write benchmark results
=========================
Write benchmark results.
:Copyright:
Copyright 2014 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.write [-p plain] [-t table] <pickled
-p plain Plain file to write to (like docs/BENCHMARKS).
-t table Table file to write to (like docs/_userdoc/benchmark.txt).
"""
from __future__ import print_function
if __doc__:
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
import os as _os
import re as _re
import sys as _sys
try:
unicode
except NameError:
def uni(v):
if hasattr(v, 'decode'):
return v.decode('latin-1')
return str(v)
else:
def uni(v):
if isinstance(v, unicode):
return v.encode('utf-8')
return str(v)
def write_table(filename, results):
"""
Output tabled benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
try:
next
except NameError:
next = lambda i: (getattr(i, 'next', None) or i.__next__)()
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
names = [
('simple_port', 'Simple Port'),
('jsmin_2_0_9', 'jsmin 2.0.9'),
('rjsmin', '|rjsmin|'),
('_rjsmin', r'_\ |rjsmin|'),
]
benched_per_table = 2
results = sorted(results, reverse=True)
# First we transform our data into a table (list of lists)
pythons, widths = [], [0] * (benched_per_table + 1)
last_version = None
for version, _, result in results:
version = uni(version)
if not(last_version is None or version.startswith('2.')):
continue
last_version = version
namesub = _re.compile(r'(?:-\d+(?:\.\d+)*)?\.js$').sub
result = iter(result)
tables = []
# given our data it's easier to create the table transposed...
for benched in result:
rows = [['Name'] + [desc for _, desc in names]]
for _ in range(benched_per_table):
if _:
try:
benched = next(result)
except StopIteration:
rows.append([''] + ['' for _ in names])
continue
times = dict((
uni(port), (time, benched['sizes'][idx])
) for idx, (port, time) in enumerate(benched['times']))
columns = ['%s (%.1f)' % (
namesub('', _os.path.basename(uni(benched['filename']))),
benched['size'] / 1024.0,
)]
for idx, (port, _) in enumerate(names):
if port not in times:
columns.append('n/a')
continue
time, size = times[port]
if time is None:
columns.append('(failed)')
continue
columns.append('%s%.2f ms (%.1f %s)' % (
idx == 0 and ' ' or '',
time,
size / 1024.0,
idx == 0 and '\\*' or ['=', '>', '<'][
cmp(size, benched['sizes'][0])
],
))
rows.append(columns)
# calculate column widths (global for all tables)
for idx, row in enumerate(rows):
widths[idx] = max(widths[idx], max(map(len, row)))
# ... and transpose it back.
tables.append(zip(*rows))
pythons.append((version, tables))
if last_version.startswith('2.'):
break
# Second we create a rest table from it
lines = []
separator = lambda c='-': '+'.join([''] + [
c * (width + 2) for width in widths
] + [''])
for idx, (version, tables) in enumerate(pythons):
if idx:
lines.append('')
lines.append('')
line = 'Python %s' % (version,)
lines.append(line)
lines.append('~' * len(line))
for table in tables:
lines.append('')
lines.append('.. rst-class:: benchmark')
lines.append('')
for idx, row in enumerate(table):
if idx == 0:
# header
lines.append(separator())
lines.append('|'.join([''] + [
' %s%*s ' % (col, len(col) - width, '')
for width, col in zip(widths, row)
] + ['']))
lines.append(separator('='))
else: # data
lines.append('|'.join([''] + [
j == 0 and (
' %s%*s ' % (col, len(col) - widths[j], '')
) or (
['%*s ', ' %*s '][idx == 1] % (widths[j], col)
)
for j, col in enumerate(row)
] + ['']))
lines.append(separator())
fplines = []
fp = open(filename)
try:
fpiter = iter(fp)
for line in fpiter:
line = line.rstrip()
if line == '.. begin tables':
buf = []
for line in fpiter:
line = line.rstrip()
if line == '.. end tables':
fplines.append('.. begin tables')
fplines.append('')
fplines.extend(lines)
fplines.append('')
fplines.append('.. end tables')
buf = []
break
else:
buf.append(line)
else:
fplines.extend(buf)
_sys.stderr.write("Placeholder container not found!\n")
else:
fplines.append(line)
finally:
fp.close()
fp = open(filename, 'w')
try:
fp.write('\n'.join(fplines) + '\n')
finally:
fp.close()
def write_plain(filename, results):
"""
Output plain benchmark results
:Parameters:
`filename` : ``str``
Filename to write to
`results` : ``list``
Results
"""
lines = []
results = sorted(results, reverse=True)
for idx, (version, import_notes, result) in enumerate(results):
if idx:
lines.append('')
lines.append('')
lines.append('$ python%s -OO bench/main.py bench/*.js' % (
'.'.join(version.split('.')[:2])
))
lines.append('~' * 72)
for note in import_notes:
lines.append(uni(note))
lines.append('Python Release: %s' % (version,))
for single in result:
lines.append('')
lines.append('Benchmarking %r... (%.1f KiB)' % (
uni(single['filename']), single['size'] / 1024.0
))
for msg in single['messages']:
lines.append(msg)
times = []
space = max([len(uni(port)) for port, _ in single['times']])
for idx, (port, time) in enumerate(single['times']):
port = uni(port)
if time is None:
lines.append(" FAILED %s" % (port,))
else:
times.append(time)
lines.append(
" Timing %s%s ... (%5.1f KiB %s) %8.2f ms" % (
port,
" " * (space - len(port)),
single['sizes'][idx] / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(single['sizes'][idx], single['sizes'][0])
],
time
)
)
if len(times) > 1:
lines[-1] += " (factor: %s)" % (', '.join([
'%.2f' % (timed / time) for timed in times[:-1]
]))
lines.append('')
lines.append('')
lines.append('# vim: nowrap')
fp = open(filename, 'w')
try:
fp.write('\n'.join(lines) + '\n')
finally:
fp.close()
def main(argv=None):
""" Main """
import getopt as _getopt
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hp:t:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print(
"%s\nTry %s -mbench.write --help" % (
e,
_os.path.basename(_sys.executable),
), file=_sys.stderr)
_sys.exit(2)
plain, table = None, None
for key, value in opts:
if key in ("-h", "--help"):
print(
"%s -mbench.write [-p plain] [-t table] <pickled" % (
_os.path.basename(_sys.executable),
), file=_sys.stderr)
_sys.exit(0)
elif key == '-p':
plain = str(value)
elif key == '-t':
table = str(value)
struct = []
_sys.stdin = getattr(_sys.stdin, 'detach', lambda: _sys.stdin)()
try:
while True:
version, import_notes, result = _pickle.load(_sys.stdin)
if hasattr(version, 'decode'):
version = version.decode('latin-1')
struct.append((version, import_notes, result))
except EOFError:
pass
if plain:
write_plain(plain, struct)
if table:
write_table(table, struct)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -5,912,417,761,413,347,000
| 29.800578
| 77
| 0.449282
| false
| 4.22394
| false
| false
| false
|
bndl/bndl
|
bndl/net/watchdog.py
|
1
|
7044
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio.futures import CancelledError
from datetime import datetime
from random import random
import asyncio
import atexit
import logging
from bndl.net.messages import Ping
logger = logging.getLogger(__name__)
# The time in seconds between checking connections
WATCHDOG_INTERVAL = 2
# allow at most 10 connection attempts
# after that, drop the peer connection from the
# peer table
MAX_CONNECTION_ATTEMPT = 10
# The maximum time in seconds with no communication
# after which a ping is sent
DT_PING_AFTER = 60
# The maximum time in seconds with no communication
# after which the connection is considered lost
DT_MAX_INACTIVE = DT_PING_AFTER * 2
class PeerStats(object):
def __init__(self, peer):
self.peer = peer
self.connection_attempts = 0
self.last_update = datetime.now()
self.last_reconnect = None
self.error_since = None
self.bytes_sent = 0
self.bytes_sent_rate = 0
self.bytes_received = 0
self.bytes_received_rate = 0
def update(self):
now = datetime.now()
interval = (now - self.last_update).total_seconds()
self.last_update = now
if not self.peer.is_connected and self.peer.connected_on is not None:
if not self.error_since:
logger.info('%r disconnected', self.peer)
self.error_since = self.error_since or now
self.bytes_sent_rate = 0
self.bytes_received_rate = 0
return
# calculate tx and rx rates
if self.peer.is_connected:
self.bytes_sent_rate = (self.peer.conn.bytes_sent - self.bytes_sent) / interval
self.bytes_sent = self.peer.conn.bytes_sent
self.bytes_received_rate = (self.peer.conn.bytes_received - self.bytes_received) / interval
self.bytes_received = self.peer.conn.bytes_received
if self.peer.last_rx and (now - self.peer.last_rx).total_seconds() > DT_MAX_INACTIVE:
if not self.error_since:
logger.info('%r is inactive for more than %s seconds (%s)', self.peer,
DT_MAX_INACTIVE, now - self.peer.last_rx)
self.error_since = self.error_since or now
else:
if self.error_since:
logger.info('%s recovered', self.peer)
# clear error stats
self.connection_attempts = 0
self.error_since = None
def __str__(self):
if self.error_since:
fmt = '{peer.name} error since {error_since}'
else:
fmt = '{peer.name} communicating at {bytes_received_rate:.2f} kbps rx, {bytes_sent_rate} kbps tx'
return fmt.format_map(self.__dict__)
class Watchdog(object):
def __init__(self, node):
self.node = node
self._peer_stats = {}
self.monitor_task = None
atexit.register(self.stop)
def start(self):
self.monitor_task = self.node.loop.create_task(self._monitor())
def stop(self):
self.monitor_task = None
def peer_stats(self, peer):
stats = self._peer_stats.get(peer)
if not stats:
self._peer_stats[peer] = stats = PeerStats(peer)
return stats
@asyncio.coroutine
def _monitor(self):
try:
while self.monitor_task and self.node.running:
yield from self._check()
yield from asyncio.sleep(WATCHDOG_INTERVAL, loop=self.node.loop) # @UndefinedVariable
except CancelledError:
pass
@asyncio.coroutine
def _ping(self, peer):
try:
yield from peer.send(Ping())
except Exception:
self.peer_stats(peer).update()
logger.warning('Unable to send ping to peer %r', peer, exc_info=True)
@asyncio.coroutine
def _check(self):
for name in list(self.node.peers.keys()):
try:
# check a connection with a peer
yield from self._check_peer(name)
except CancelledError:
raise
except Exception:
logger.exception('unable to check peer %s of %s', self.node.name, name)
# if no nodes are connected, attempt to connect with the seeds
if not any(peer.is_connected for peer in self.node.peers.values()):
yield from self.node._connect_seeds()
@asyncio.coroutine
def _check_peer(self, name):
try:
peer = self.node.peers[name]
except KeyError:
return
if peer.name != name:
logger.info('Peer %s of node %s registered under %s, updating registration',
peer.name, self.node.name, name)
peer = self.node.peers.pop(name)
self.node.peers[name] = peer
stats = self.peer_stats(peer)
stats.update()
if stats.connection_attempts > MAX_CONNECTION_ATTEMPT:
popped = self.node.peers.pop(name)
if popped != peer:
self.node.peers[name] = popped
yield from peer.disconnect('disconnected by watchdog after %s failed connection attempts',
stats.connection_attempts)
elif stats.error_since:
# max reconnect interval is:
# - twice the watch_dog interval (maybe something was missed)
# - exponentially to the connection attempts (exponentially back off)
# - with a random factor between 1 +/- .25
now = datetime.now()
connect_wait = WATCHDOG_INTERVAL * 2 ** stats.connection_attempts * (.75 + random() / 2)
if (now - stats.error_since).total_seconds() > WATCHDOG_INTERVAL * 2 and \
(not stats.last_reconnect or (now - stats.last_reconnect).total_seconds() > connect_wait):
stats.connection_attempts += 1
stats.last_reconnect = now
yield from peer.connect()
elif peer.is_connected and \
peer.last_rx and \
(datetime.now() - peer.last_rx).total_seconds() > DT_PING_AFTER:
yield from self._ping(peer)
def rxtx_stats(self):
stats = dict(
bytes_sent=0,
bytes_sent_rate=0,
bytes_received=0,
bytes_received_rate=0
)
for peer_stats in self._peer_stats.values():
for k in stats.keys():
stats[k] += getattr(peer_stats, k, 0)
return stats
|
apache-2.0
| -6,064,973,426,037,575,000
| 32.542857
| 109
| 0.59753
| false
| 4.055268
| false
| false
| false
|
shibanis1/spark-tk
|
python/sparktk/frame/ops/drop_rows.py
|
1
|
1541
|
from sparktk.frame.row import Row
def drop_rows(self, predicate):
"""
Erase any row in the current frame which qualifies.
Parameters
----------
:param predicate: (UDF) Function which evaluates a row to a boolean; rows that answer True are dropped from
the frame.
Examples
--------
>>> frame = tc.frame.create([['Fred',39,16,'555-1234'],
... ['Susan',33,3,'555-0202'],
... ['Thurston',65,26,'555-4510'],
... ['Judy',44,14,'555-2183']],
... schema=[('name', str), ('age', int), ('tenure', int), ('phone', str)])
>>> frame.inspect()
[#] name age tenure phone
====================================
[0] Fred 39 16 555-1234
[1] Susan 33 3 555-0202
[2] Thurston 65 26 555-4510
[3] Judy 44 14 555-2183
>>> frame.drop_rows(lambda row: row.name[-1] == 'n') # drop people whose name ends in 'n'
>>> frame.inspect()
[#] name age tenure phone
================================
[0] Fred 39 16 555-1234
[1] Judy 44 14 555-2183
More information on a |UDF| can be found at :doc:`/ds_apir`.
"""
row = Row(self.schema)
def drop_rows_func(r):
row._set_data(r)
return not predicate(row)
self._python.rdd = self._python.rdd.filter(drop_rows_func)
|
apache-2.0
| 887,973,931,321,488,600
| 33.244444
| 111
| 0.447761
| false
| 3.617371
| false
| false
| false
|
qix/tooler
|
tooler/command.py
|
1
|
1233
|
import io
from typing import Dict, Optional
from .exceptions import CommandHelpException
from .parser import DefaultParser
class Command:
def __init__(self):
pass
def run(self, selector, argv):
raise Exception("not implemented")
class DecoratorCommand(Command):
def __init__(self, fn, doc=None, parser=None, shorthands: Optional[Dict[str, str]] = None):
# @todo: Should just take an actual `parser` object, but need to do a large
# refactor to fix that.
if parser:
assert not shorthands, "Shorthands option is not compatible with custom parser"
self.parser = parser()
else:
self.parser = DefaultParser(shorthands=shorthands)
self.fn = fn
self.doc = doc
def run(self, selector, argv):
try:
(args, vargs) = self.parser.parse(
self.fn,
self.doc,
selector,
argv
)
except CommandHelpException as e:
print(e.usage)
return
try:
return self.fn(*args, **vargs)
finally:
# Close any files that were opened as arguments
for value in [*args, *vargs.values()]:
# Skip as linter is not aware of `file` type
if isinstance(value, io.IOBase):
value.close()
|
mit
| -8,756,057,895,321,486,000
| 24.6875
| 93
| 0.632603
| false
| 3.865204
| false
| false
| false
|
felixmatt/shyft
|
shyft/repository/service/yaml_geo_location_repository.py
|
1
|
1391
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import yaml
from .ssa_geo_ts_repository import GeoLocationRepository
from os import path
class YamlGeoLocationError(Exception):
pass
class YamlGeoLocationRepository(GeoLocationRepository):
"""
Provide a yaml-based key-location map for gis-identites not available(yet)
"""
def __init__(self, yaml_file_dir):
"""
Parameters
----------
yaml_file_dir:string
path to directory where files
pt_locations-epsg_32632.yml (UTM32N) and
pt_locations-epsg_32633.yml (UTM33N)
pt_locations-<epsg_id>.yml
"""
self._file_dir = yaml_file_dir
def read_location_dict(self, epsg_id):
full_name = path.join(self._file_dir, _filename_of(epsg_id))
with open(full_name, 'r') as f:
return yaml.load(f)
def get_locations(self, location_id_list, epsg_id=32632):
loc_dict = self.read_location_dict(epsg_id)
locations = {}
for index in location_id_list:
if loc_dict.get(index) is not None:
locations[index] = tuple(loc_dict[index])
else:
raise YamlGeoLocationError("Could not get location of geo point-id!")
return locations
def _filename_of(epsg_id):
return "pt_locations-epsg_{}.yml".format(epsg_id)
|
lgpl-3.0
| -733,572,491,415,712,300
| 27.979167
| 85
| 0.608196
| false
| 3.566667
| false
| false
| false
|
botify-labs/simpleflow
|
swf/querysets/history.py
|
1
|
2268
|
from swf.models import History
from swf.querysets.base import BaseQuerySet
class HistoryQuerySet(BaseQuerySet):
"""WorkflowExecution history queryset"""
def __init__(self, domain, *args, **kwargs):
super(HistoryQuerySet, self).__init__(*args, **kwargs)
self.domain = domain
def get(self, run_id, workflow_id, max_results=None, page_size=100, reverse=False):
"""Retrieves a WorkflowExecution history
:param run_id: unique identifier of the workflow execution
:type run_id: string
:param workflow_id: The user defined identifier associated with the workflow execution
:type workflow_id: string
:param max_results: Max output history size. Retrieved history will be shrinked
if it's size is greater than max_results.
:type max_results: int
:param page_size: Swf api response page size: controls how many history events
will be returned at each requests. Keep in mind that until
max_results history size is reached, next pages will be
requested.
:type page_size: int
:param reverse: Should the history events be retrieved in reverse order.
:type reverse: bool
"""
max_results = max_results or page_size
if max_results < page_size:
page_size = max_results
response = self.connection.get_workflow_execution_history(
self.domain.name,
run_id,
workflow_id,
maximum_page_size=page_size,
reverse_order=reverse,
)
events = response["events"]
next_page = response.get("nextPageToken")
while next_page is not None and len(events) < max_results:
response = self.connection.get_workflow_execution_history(
self.domain.name,
run_id,
workflow_id,
maximum_page_size=page_size,
next_page_token=next_page,
reverse_order=reverse,
)
events.extend(response["events"])
next_page = response.get("nextPageToken")
return History.from_event_list(events)
|
mit
| -425,559,705,143,165,500
| 36.180328
| 95
| 0.592152
| false
| 4.536
| false
| false
| false
|
ic-hep/DIRAC
|
Core/Utilities/ClassAd/ClassAdLight.py
|
1
|
8864
|
########################################################################
# $HeadURL$
########################################################################
""" ClassAd Class - a light purely Python representation of the
Condor ClassAd library.
"""
__RCSID__ = "$Id$"
class ClassAd:
def __init__( self, jdl ):
"""ClassAd constructor from a JDL string
"""
self.contents = {}
result = self.__analyse_jdl( jdl )
if result:
self.contents = result
def __analyse_jdl( self, jdl, index = 0 ):
"""Analyse one [] jdl enclosure
"""
jdl = jdl.strip()
# Strip all the blanks first
#temp = jdl.replace(' ','').replace('\n','')
temp = jdl
result = {}
if temp[0] != '[' or temp[-1] != ']':
print "Invalid JDL: it should start with [ and end with ]"
return result
# Parse the jdl string now
body = temp[1:-1]
index = 0
namemode = 1
valuemode = 0
while index < len( body ):
if namemode:
ind = body.find( "=", index )
if ind != -1:
name = body[index:ind]
index = ind + 1
valuemode = 1
namemode = 0
else:
break
elif valuemode:
ind1 = body.find( "[", index )
ind2 = body.find( ";", index )
if ind1 != -1 and ind1 < ind2:
value, newind = self.__find_subjdl( body, ind1 )
elif ind1 == -1 and ind2 == -1:
value = body[index:]
newind = len( body )
else:
if index == ind2:
return {}
else:
value = body[index:ind2]
newind = ind2 + 1
result[name.strip()] = value.strip().replace( '\n', '' )
index = newind
valuemode = 0
namemode = 1
return result
def __find_subjdl( self, body, index ):
""" Find a full [] enclosure starting from index
"""
result = ''
if body[index] != '[':
return ( result, 0 )
depth = 0
ind = index
while ( depth < 10 ):
ind1 = body.find( ']', ind + 1 )
ind2 = body.find( '[', ind + 1 )
if ind2 != -1 and ind2 < ind1:
depth += 1
ind = ind2
else:
if depth > 0:
depth -= 1
ind = ind1
else:
result = body[index:ind1 + 1]
if body[ind1 + 1] == ";":
return ( result, ind1 + 2 )
else:
return result, 0
return result, 0
def insertAttributeInt( self, name, attribute ):
"""Insert a named integer attribute
"""
self.contents[name] = str( attribute )
def insertAttributeBool( self, name, attribute ):
"""Insert a named boolean attribute
"""
if attribute:
self.contents[name] = 'true'
else:
self.contents[name] = 'false'
def insertAttributeString( self, name, attribute ):
"""Insert a named string attribute
"""
self.contents[name] = '"' + str( attribute ) + '"'
def insertAttributeVectorString( self, name, attributelist ):
"""Insert a named string list attribute
"""
tmp = map ( lambda x : '"' + x + '"', attributelist )
tmpstr = ','.join( tmp )
self.contents[name] = '{' + tmpstr + '}'
def insertAttributeVectorInt( self, name, attributelist ):
"""Insert a named string list attribute
"""
tmp = map ( lambda x : str( x ), attributelist )
tmpstr = ','.join( tmp )
self.contents[name] = '{' + tmpstr + '}'
def insertAttributeVectorStringList( self, name, attributelist ):
"""Insert a named list of string lists
"""
listOfLists = []
for stringList in attributelist:
#tmp = map ( lambda x : '"' + x + '"', stringList )
tmpstr = ','.join( stringList )
listOfLists.append('{' + tmpstr + '}')
self.contents[name] = '{' + ','.join(listOfLists) + '}'
def lookupAttribute( self, name ):
"""Check the presence of the given attribute
"""
return self.contents.has_key( name )
def set_expression( self, name, attribute ):
"""Insert a named expression attribute
"""
self.contents[name] = str( attribute )
def get_expression( self, name ):
"""Get expression corresponding to a named attribute
"""
if self.contents.has_key( name ):
if isinstance( self.contents[name], ( int, long ) ):
return str( self.contents[name] )
else :
return self.contents[name]
else:
return ""
def isAttributeList( self, name ):
""" Check if the given attribute is of the List type
"""
attribute = self.get_expression( name ).strip()
return attribute.startswith( '{' )
def getListFromExpression( self, name ):
""" Get a list of strings from a given expression
"""
tempString = self.get_expression( name ).strip()
listMode = False
if tempString.startswith('{'):
tempString = tempString[1:-1]
listMode = True
tempString = tempString.replace( " ", "" ).replace( '\n','' )
if tempString.find('{') < 0:
if not listMode:
tempString = tempString.replace( "\"", "" )
return tempString.split( ',' )
resultList = []
while tempString:
if tempString.find( '{' ) == 0 :
end = tempString.find( '}' )
resultList.append(tempString[:end+1])
tempString = tempString[end+1:]
if tempString.startswith(','):
tempString = tempString[1:]
elif tempString.find( '"' ) == 0 :
end = tempString[1:].find( '"' )
resultList.append( tempString[1:end+1] )
tempString = tempString[end+2:]
if tempString.startswith(','):
tempString = tempString[1:]
else:
end = tempString.find( ',' )
if end < 0:
resultList.append( tempString.replace( "\"", "" ).replace( " ", "" ) )
break
else:
resultList.append( tempString[:end].replace( "\"", "" ).replace( " ", "" ) )
tempString = tempString[end+1:]
return resultList
def getDictionaryFromSubJDL( self, name ):
""" Get a dictionary of the JDL attributes from a subsection
"""
tempList = self.get_expression( name )[1:-1]
resDict = {}
for item in tempList.split( ';' ):
if len( item.split( '=' ) ) == 2:
resDict[item.split( '=' )[0].strip()] = item.split( '=' )[1].strip().replace( '"', '' )
else:
return {}
return resDict
def deleteAttribute( self, name ):
"""Delete a named attribute
"""
if self.contents.has_key( name ):
del self.contents[name]
return 1
else:
return 0
def isOK( self ):
"""Check the JDL validity - to be defined
"""
if self.contents:
return 1
else:
return 0
def asJDL( self ):
"""Convert the JDL description into a string
"""
result = ''
for name, value in self.contents.items():
if value[0:1] == "{":
result = result + 4 * ' ' + name + " = \n"
result = result + 8 * ' ' + '{\n'
strings = value[1:-1].split( ',' )
for st in strings:
result = result + 12 * ' ' + st.strip() + ',\n'
result = result[:-2] + '\n' + 8 * ' ' + '};\n'
elif value[0:1] == "[":
tempad = ClassAd( value )
tempjdl = tempad.asJDL() + ';'
lines = tempjdl.split( '\n' )
result = result + 4 * ' ' + name + " = \n"
for line in lines:
result = result + 8 * ' ' + line + '\n'
else:
result = result + 4 * ' ' + name + ' = ' + str( value ) + ';\n'
return "[ \n" + result[:-1] + "\n]"
def getAttributeString( self, name ):
""" Get String type attribute value
"""
value = ''
if self.lookupAttribute( name ):
value = self.get_expression( name ).replace( '"', '' )
return value
def getAttributeInt( self, name ):
""" Get Integer type attribute value
"""
value = 0
if self.lookupAttribute( name ):
try:
value = int( self.get_expression( name ).replace( '"', '' ) )
except Exception:
value = 0
return value
def getAttributeBool( self, name ):
""" Get Boolean type attribute value
"""
if self.lookupAttribute( name ):
value = self.get_expression( name ).replace( '"', '' )
else:
return False
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
return False
def getAttributeFloat( self, name ):
""" Get Float type attribute value
"""
value = 0.0
if self.lookupAttribute( name ):
try:
value = float( self.get_expression( name ).replace( '"', '' ) )
except Exception:
value = 0.0
return value
def getAttributes( self ):
""" Get the list of all the attribute names
:return: list of names as strings
"""
return self.contents.keys()
|
gpl-3.0
| -481,757,788,879,941,700
| 26.027439
| 95
| 0.525496
| false
| 3.863993
| false
| false
| false
|
google-research/robel
|
robel/scripts/check_mujoco_deps.py
|
1
|
1290
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks if the given MuJoCo XML file has valid dependencies.
Example usage:
python -m robel.scripts.check_mujoco_deps path/to/mujoco.xml
"""
import argparse
import logging
import os
from robel.utils.resources import AssetBundle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', nargs=1, help='The MuJoCo XML to parse.')
args = parser.parse_args()
model_path = args.path[0]
if not os.path.exists(model_path):
raise ValueError('Path does not exist: ' + model_path)
logging.basicConfig(level=logging.INFO)
with AssetBundle(dry_run=True, verbose=True) as bundle:
bundle.add_mujoco(model_path)
if __name__ == '__main__':
main()
|
apache-2.0
| -2,726,141,442,768,360,400
| 29
| 74
| 0.723256
| false
| 3.696275
| false
| false
| false
|
shgo/baixa_camara
|
obter_inteiro_teor.py
|
1
|
7140
|
#!/usr/bin/python3
#-*- encoding: utf-8 -*-
#Copyright (C) 2016 Saullo Oliveira
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Percorre as proposições já armazenadas, obter o inteiro teor de cada uma e
processa o texto.
"""
__author__ = "Saullo Oliveira"
__copyright__ = "Copyright 2016"
__credits__ = ["Saullo Oliveira"]
__license__ = "GPLv3"
__version__ = "0.1"
__maintainer__ = "Saullo Oliveira"
__email__ = "shgo@dca.fee.unicamp.br"
__status__ = "Development"
from io import StringIO
import os.path
import argparse
import pickle as pkl
import urllib.request
import urllib.parse
import logging
import re
import magic
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from docx import Document
def get_inteiro_teor(prop):
"""
Obtém o conteúdo do inteiro teor de prop, e já tokeniza.
Args:
prop (Proposicao)
"""
print('{}\tObtendo inteiro teor da proposição {}'.format(
prop.ano, prop.id_))
print(prop.link_inteiro_teor)
#se o inteiro teor já foi coletado, não faz nada
if hasattr(prop, 'inteiro_teor'):
return prop
#caso não tenha link do inteiro teor
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(prop.link_inteiro_teor):
logging.warning('MISSING - %s não tem link para inteiro teor.\n',
prop.id_)
return prop
arquivo = urllib.request.urlretrieve(prop.link_inteiro_teor)
with open(arquivo[0], 'rb') as arq:
cabecalho = magic.from_file(arquivo[0])
texto = ''
try:
if cabecalho.startswith(b'PDF'):
parser = PDFParser(arq)
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
doc.initialize()
rsrcmgr = PDFResourceManager()
output = StringIO()
converter = TextConverter(rsrcmgr, output, laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, converter)
print('\t\tprocessando páginas')
for page in doc.get_pages():
interpreter.process_page(page)
texto = output.getvalue()
elif cabecalho.startswith(b'Com'):
document = Document(arq)
print('\t\tprocessando paragrafos')
for paragraph in document:
texto += paragraph.text
else:
raise Exception('Formato desconhecido')
print('\t\ttokenizando')
prop.inteiro_teor = re.split(r'\W+', texto)
except:
logging.warning('CORRUPT: %s arquivo corrupto! Oferecer dinheiro!',
prop.id_)
logging.warning(prop.link_inteiro_teor)
nome = 'inteiro_teor/inteiro_teor_{}.doc'.format(prop.id_)
with open(nome, 'wb') as salvar:
salvar.write(arq.read())
logging.warning('arquivo salvo em %s\n', nome)
return prop
def main():
#tratando os argumentos da linha de comando
parser = argparse.ArgumentParser(
description="""Baixa e processa os arquivos com inteiro teor para
cada proposição de lei no arquivo correspondente aos
parâmtros. As proposições que não forem processadas por
qualquer motivo (arquivo corrupto (hahaha), ou sem link)
estarão listadas no log, e os arquivos se corruptos,
serão baixados para a pasta inteiro_teor.""",
epilog="""Ex. de uso: para baixar o inteiro teor do arquivo
down_files/prop_props_PL_2016_apens_True.pkl:
./obter_inteiro_teor.py -anos 2016 -tipos PL -apensadas""")
parser.add_argument('-anos', type=int, action='append', nargs='*',
help="""anos das proposições já baixadas sem inteiro
teor.""")
parser.add_argument('-tipos', type=str, nargs='*',
help="""tipos de proposição já baixadas sem inteiro teor.""")
parser.add_argument('-apensadas', action='store_true',
help="""indica se o arquivo das proposições já baixadas
contém apensadas ou não. Útil para encontrar o
arquivo correto.""")
args = vars(parser.parse_args())
licensa = ("baixa_camara Copyright (C) 2016 Saullo Oliveira\n"
"This program comes with ABSOLUTELY NO WARRANTY;\n"
"This is free software, and you are welcome to redistribute it\n"
"under certain conditions; See COPYING file for more"
"information.\n"
"Type ENTER to continue...")
print(licensa)
input()
apens = args['apensadas']
for tp in args['tipos']:
for ano in args['anos'][0]:
print('Tipo {} ano {}.'.format(tp, ano))
logging.basicConfig(filename="logs/warnings_{}_{}.log".format(tp,
ano),
level=logging.WARNING)
if os.path.isfile('down_files/prop_props_{}_{}_apens{}.pkl'\
.format(tp, ano, apens)):
with open('down_files/prop_props_{}_{}_apens_{}.pkl'\
.format(tp, ano, apens), 'rb')\
as arq_prop:
print('Processando {}-{}'.format(tp, ano))
props = pkl.load(arq_prop)
props = [get_inteiro_teor(prop) for prop in props]
with open('down_files/prop_props_{}_{}_apens_{}.pkl'\
.format(tp, ano, apens), 'wb')\
as arq_prop:
print('Salvando {}-{}'.format(tp, ano))
pkl.dump(props, arq_prop)
else:
print(("\tarquivo não encontrado. Você já rodou o script "
"obter_proposicoes.py?"))
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,356,535,699,134,562,000
| 41.795181
| 103
| 0.562922
| false
| 3.555556
| false
| false
| false
|
StartTheShift/thunderdome
|
thunderdome/properties.py
|
1
|
13707
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import copy
from datetime import datetime
from decimal import Decimal as D
import re
import time
import warnings
from uuid import uuid1, uuid4
from uuid import UUID as _UUID
from thunderdome.exceptions import ValidationError
# Saving strategies for thunderdome. These are used to indicate when a property
# should be saved after the initial vertex/edge creation.
#
# SAVE_ONCE - Only save this value once. If it changes throw an exception.
# SAVE_ONCHANGE - Only save this value if it has changed.
# SAVE_ALWAYS - Save this value every time the corresponding model is saved.
SAVE_ONCE = 1
SAVE_ONCHANGE = 2
SAVE_ALWAYS = 3
class BaseValueManager(object):
"""
Value managers are used to manage values pulled from the database and
track state changes.
"""
def __init__(self, instance, column, value):
"""
Initialize the value manager.
:param instance: An object instance
:type instance: mixed
:param column: The column to manage
:type column: thunder.columns.Column
:param value: The initial value of the column
:type value: mixed
"""
self._create_private_fields()
self.instance = instance
self.column = column
self.previous_value = value
self.value = value
def _create_private_fields(self):
self._previous_value = None
@property
def previous_value(self):
return self._previous_value
@previous_value.setter
def previous_value(self, val):
self._previous_value = copy.copy(val)
@property
def deleted(self):
"""
Indicates whether or not this value has been deleted.
:rtype: boolean
"""
return self.value is None and self.previous_value is not None
@property
def changed(self):
"""
Indicates whether or not this value has changed.
:rtype: boolean
"""
return self.value != self.previous_value
def getval(self):
"""Return the current value."""
return self.value
def setval(self, val):
"""
Updates the current value.
:param val: The new value
:type val: mixed
"""
self.value = val
def delval(self):
"""Delete a given value"""
self.value = None
def get_property(self):
"""
Returns a value-managed property attributes
:rtype: property
"""
_get = lambda slf: self.getval()
_set = lambda slf, val: self.setval(val)
_del = lambda slf: self.delval()
if self.column.can_delete:
return property(_get, _set, _del)
else:
return property(_get, _set)
class Column(object):
"""Base class for column types"""
value_manager = BaseValueManager
instance_counter = 0
def __init__(self,
primary_key=False,
index=False,
db_field=None,
default=None,
required=False,
save_strategy=None):
"""
Initialize this column with the given information.
:param primary_key: Indicates whether or not this is primary key
:type primary_key: boolean
:param index: Indicates whether or not this field should be indexed
:type index: boolean
:param db_field: The fieldname this field will map to in the database
:type db_field: str
:param default: Value or callable with no args to set default value
:type default: mixed or callable
:param required: Whether or not this field is required
:type required: boolean
:param save_strategy: Strategy used when saving the value of the column
:type save_strategy: int
"""
self.primary_key = primary_key
self.index = index
self.db_field = db_field
self.default = default
self.required = required
self.save_strategy = save_strategy
#the column name in the model definition
self.column_name = None
self.value = None
#keep track of instantiation order
self.position = Column.instance_counter
Column.instance_counter += 1
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.has_default:
return self.get_default()
elif self.required:
raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))
return value
def to_python(self, value):
"""
Converts data from the database into python values
raises a ValidationError if the value can't be converted
"""
return value
def to_database(self, value):
"""
Converts python value into database value
"""
if value is None and self.has_default:
return self.get_default()
return value
@property
def has_default(self):
"""
Indicates whether or not this column has a default value.
:rtype: boolean
"""
return self.default is not None
@property
def has_save_strategy(self):
"""
Indicates whether or not this column has a save strategy.
:rtype: boolean
"""
return self.save_strategy is not None
@property
def can_delete(self):
return not self.primary_key
def get_save_strategy(self):
"""
Returns the save strategy attached to this column.
:rtype: int or None
"""
return self.save_strategy
def get_default(self):
"""
Returns the default value for this column if one is available.
:rtype: mixed or None
"""
if self.has_default:
if callable(self.default):
return self.default()
else:
return self.default
def set_column_name(self, name):
"""
Sets the column name during document class construction This value will
be ignored if db_field is set in __init__
:param name: The name of this column
:type name: str
"""
self.column_name = name
@property
def db_field_name(self):
"""Returns the name of the thunderdome name of this column"""
return self.db_field or self.column_name
class String(Column):
def __init__(self, *args, **kwargs):
required = kwargs.get('required', False)
self.min_length = kwargs.pop('min_length', 1 if required else None)
self.max_length = kwargs.pop('max_length', None)
self.encoding = kwargs.pop('encoding', 'utf-8')
if 'default' in kwargs and isinstance(kwargs['default'], basestring):
kwargs['default'] = kwargs['default'].encode(self.encoding)
super(Text, self).__init__(*args, **kwargs)
def validate(self, value):
# Make sure that shit gets encoded properly
if isinstance(value, unicode):
value = value.encode(self.encoding)
value = super(Text, self).validate(value)
if value is None:
return None
if not isinstance(value, basestring) and value is not None:
raise ValidationError('{} is not a string'.format(type(value)))
if self.max_length:
if len(value) > self.max_length:
raise ValidationError('{} is longer than {} characters'.format(self.column_name, self.max_length))
if self.min_length:
if len(value) < self.min_length:
raise ValidationError('{} is shorter than {} characters'.format(self.column_name, self.min_length))
return value
Text = String
class Integer(Column):
def validate(self, value):
val = super(Integer, self).validate(value)
if val is None:
return
try:
return long(val)
except (TypeError, ValueError):
raise ValidationError("{} can't be converted to integral value".format(value))
def to_python(self, value):
if value is not None:
return long(value)
def to_database(self, value):
value = super(Integer, self).to_database(value)
if value is not None:
return long(value)
class DateTime(Column):
def __init__(self, strict=True, **kwargs):
"""
Initialize date-time column with the given settings.
:param strict: Whether or not to attempt to automatically coerce types
:type strict: boolean
"""
self.strict = strict
super(DateTime, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, datetime):
return value
return datetime.fromtimestamp(float(value))
def to_database(self, value):
value = super(DateTime, self).to_database(value)
if value is None:
return
if not isinstance(value, datetime):
if not self.strict and isinstance(value, (basestring, int, float)):
value = datetime.fromtimestamp(float(value))
else:
raise ValidationError("'{}' is not a datetime object".format(value))
tmp = time.mktime(value.timetuple()) # gives us a float with .0
# microtime is a 6 digit int, so we bring it down to .xxx and add it to the float TS
tmp = tmp + float(value.microsecond) / 1000000
return tmp
class UUID(Column):
"""Universally Unique Identifier (UUID) type - UUID4 by default"""
re_uuid = re.compile(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
def __init__(self, default=lambda: str(uuid4()), **kwargs):
super(UUID, self).__init__(default=default, **kwargs)
def validate(self, value):
val = super(UUID, self).validate(value)
if val is None:
return None # if required = False and not given
if not self.re_uuid.match(str(val)):
raise ValidationError("{} is not a valid uuid".format(value))
return val
def to_python(self, value):
val = super(UUID, self).to_python(value)
return str(val)
def to_database(self, value):
val = super(UUID, self).to_database(value)
if val is None:
return
return str(val)
class Boolean(Column):
def to_python(self, value):
return bool(value)
def to_database(self, value):
val = super(Boolean, self).to_database(value)
return bool(val)
class Double(Column):
def __init__(self, **kwargs):
self.db_type = 'double'
super(Double, self).__init__(**kwargs)
def validate(self, value):
val = super(Double, self).validate(value)
if val is None:
return None # required = False
try:
return float(value)
except (TypeError, ValueError):
raise ValidationError("{} is not a valid double".format(value))
def to_python(self, value):
if value is not None:
return float(value)
def to_database(self, value):
value = super(Double, self).to_database(value)
if value is not None:
return float(value)
class Float(Double):
"""Float class for backwards compatability / if you really want to"""
def __init__(self, **kwargs):
warnings.warn("Float type is deprecated. Please use Double.",
category=DeprecationWarning)
super(Float, self).__init__(**kwargs)
class Decimal(Column):
def to_python(self, value):
val = super(Decimal, self).to_python(value)
if val is not None:
return D(val)
def to_database(self, value):
val = super(Decimal, self).to_database(value)
if val is not None:
return str(val)
class Dictionary(Column):
def validate(self, value):
val = super(Dictionary, self).validate(value)
if val is None:
return None # required = False
if not isinstance(val, dict):
raise ValidationError('{} is not a valid dict'.format(val))
return val
class List(Column):
def validate(self, value):
val = super(List, self).validate(value)
if val is None:
return None # required = False
if not isinstance(val, (list, tuple)):
raise ValidationError('{} is not a valid list'.format(val))
return val
|
mit
| -7,597,682,304,716,384,000
| 28.540948
| 115
| 0.607208
| false
| 4.372249
| false
| false
| false
|
tps12/freezing-shame
|
freezing/shame/tests/views/test_cart.py
|
1
|
5789
|
from django.test import TestCase
class CartTest(TestCase):
from django.test.client import Client
Client = staticmethod(Client)
from shame.models import Store
Store = staticmethod(Store)
from shame.models import Product
Product = staticmethod(Product)
from xml.etree import ElementTree
ElementTree = staticmethod(ElementTree)
def test_addtocart(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
self.assertLess(response.status_code, 400)
def test_addrequiressku(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'notasku': product.sku },
HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_addrequiresvalidsku(self):
from uuid import uuid4
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().post(
'/cart',
{ 'sku': uuid4() },
HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_productinstore(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
store = self.Store(subdomain='another-store')
store.save()
response = self.Client().post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='another-store.example.biz')
self.assertEqual(response.status_code, 400)
def test_showcart(self):
store = self.Store(subdomain='the-store')
store.save()
response = self.Client().get('/cart', HTTP_HOST='the-store.example.biz')
self.assertEqual(response.status_code, 200)
def test_hasnewcontents(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
client = self.Client()
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'Thingy', response.content)
def test_pricesandtotals(self):
store = self.Store(subdomain='the-store')
store.save()
a = self.Product(store=store, name='Thing A', price=123)
a.save()
b = self.Product(store=store, name='Thing B', price=456)
b.save()
client = self.Client()
for product in a, a, b:
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'$1.23', response.content)
self.assertIn(b'$2.46', response.content) # == 2 * 1.23
self.assertIn(b'$4.56', response.content)
self.assertIn(b'$7.02', response.content) # == 2 * 1.23 + 4.56
def test_onecartperstore(self):
store1 = self.Store(subdomain='the-store')
store1.save()
a = self.Product(store=store1, name='Thing A', price=123)
a.save()
store2 = self.Store(subdomain='another-store')
store2.save()
b = self.Product(store=store2, name='Thing B', price=456)
b.save()
client = self.Client()
for store, product in (store1, a), (store1, a), (store2, b):
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='{}.example.biz'.format(store))
response = client.get(
'/cart',
HTTP_HOST='the-store.example.biz')
self.assertIn(b'$1.23', response.content)
self.assertNotIn(b'$4.56', response.content)
response = client.get(
'/cart',
HTTP_HOST='another-store.example.biz')
self.assertNotIn(b'$1.23', response.content)
self.assertIn(b'$4.56', response.content)
def test_nocheckoutifempty(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
response = self.Client().get('/cart', HTTP_HOST='the-store.example.biz')
for form in self.ElementTree.fromstring(response.content).iter('form'):
if form.attrib['action'].endswith('/checkout'):
self.fail()
def test_checkoutbutton(self):
store = self.Store(subdomain='the-store')
store.save()
product = self.Product(store=store, name='Thingy', price=123)
product.save()
client = self.Client()
client.post(
'/cart',
{ 'sku': product.sku },
HTTP_HOST='the-store.example.biz')
response = client.get('/cart', HTTP_HOST='the-store.example.biz')
for form in self.ElementTree.fromstring(response.content).iter('form'):
if form.attrib['action'].endswith('/checkout'):
self.assertEqual(form.attrib['method'], 'POST')
break
else:
self.fail()
|
gpl-3.0
| 6,249,128,531,331,405,000
| 29.308901
| 80
| 0.564346
| false
| 3.849069
| true
| false
| false
|
isaacyeaton/global-dyn-non-equil-gliding
|
Code/script_airfoil_snake.py
|
1
|
18626
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 14 14:16:45 2014
%reset -f
%clear
%pylab
%load_ext autoreload
%autoreload 2
@author: isaac
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
from scipy.io import loadmat
# setup better plots
import plots
reload(plots)
from plots import bmap, rcj, tl
import eqns
reload(eqns)
# %% Load in the provided data
data = loadmat('Data/Holden2014/aero_data_mod360.mat')
# get out the data
Clall = data['C_lift'].flatten()
Cdall = data['C_drag'].flatten()
alphaall = data['alpha'].flatten()
# %% "raw" experimental data
idx_exp = np.where((alphaall >= -np.deg2rad(12)) &
(alphaall <= np.deg2rad(61)))[0]
ale = alphaall[idx_exp]
Cle = Clall[idx_exp]
Cde = Cdall[idx_exp]
ClCde = Cle / Cde
#Clprimee = np.gradient(Cle, np.deg2rad(5))
#Cdprimee = np.gradient(Cde, np.deg2rad(5))
Cl_fun = UnivariateSpline(ale, Cle, k=1, s=0)
Cd_fun = UnivariateSpline(ale, Cde, k=1, s=0)
#ClCd_fun = interp1d(ale, ClCde, bounds_error=False)
Clprime_fun = Cl_fun.derivative()
Cdprime_fun = Cd_fun.derivative()
Clprimee = Clprime_fun(ale)
Cdprimee = Cdprime_fun(ale)
# %% "valid" region where date was recorded (-10 to 60 deg aoa)
idx_fit = np.where((alphaall >= -np.deg2rad(12)) &
(alphaall <= np.deg2rad(61)))[0] # was 91
alf = alphaall[idx_fit]
Clf = Clall[idx_fit]
Cdf = Cdall[idx_fit]
ClCdf = Clf / Cdf
#Clprimef = np.gradient(Clf, 5)
#Cdprimef = np.gradient(Cdf, 5)
#s = .005
s = .0001
cl_fun = UnivariateSpline(alf, Clf, s=s, k=2)
cd_fun = UnivariateSpline(alf, Cdf, s=s, k=2)
clprime_fun = cl_fun.derivative()
cdprime_fun = cd_fun.derivative()
# numerically evaluate the spline
al = np.linspace(alf[0], alf[-1], 500)
cl = cl_fun(al)
cd = cd_fun(al)
clprime = clprime_fun(al)
cdprime = cdprime_fun(al)
clcd = cl / cd
# %% Cl, Cd, and ClCd curves for paper (updated)
fig, ax = plt.subplots()
ax.axvline(0, color='gray', lw=1)
ax.axhline(0, color='gray', lw=1)
ax.plot(np.rad2deg(alf), Clf, 'o', ms=6, label=r'$C_L$')
ax.plot(np.rad2deg(alf), Cdf, 's', ms=6, label=r'$C_D$')
ax.plot(np.rad2deg(alf), ClCdf, '^', ms=6, label=r'$C_L/C_D$')
ax.plot(np.rad2deg(al), cl, color=bmap[0], lw=1.5)
ax.plot(np.rad2deg(al), cd, color=bmap[1], lw=1.5)
ax.plot(np.rad2deg(al), clcd, color=bmap[2], lw=1.5)
ax.set_xlim(-15, 65)
ax.set_ylim(-2, 3)
ax.legend(loc='lower right', frameon=False, fontsize=18)
ax.set_xlabel(r'$\alpha$', fontsize=18)
ax.set_ylabel('force coefficients', fontsize=18)
fig.canvas.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
ax.text(5, 2.5, 'airfoil snake', {'fontsize': 18})
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure4b_airfoil_snake.pdf', transparent=True)
# %% Intersections with spline data (for paper about pitch effects)
gamma = al
cgamma = 1 / np.tan(gamma)
pitch_array = np.deg2rad(np.array([-10, 10]))
_gamma_equil = np.deg2rad(np.linspace(10, 70, 1000))
fig, ax = plt.subplots()
ax.plot(np.rad2deg(gamma[gamma > 0]), cgamma[gamma > 0], c=bmap[2], lw=2,
label=r'$\cot{\gamma}$')
for idx, pitch in enumerate(pitch_array):
alpha = gamma + pitch
drag = cd_fun(alpha)
lift = cl_fun(alpha)
ratio = lift / drag
goodidx = np.where((alpha > al[0]) & (alpha < al[-1]))[0]
lb_txt = r'$\theta = {:.0f}$'.format(np.rad2deg(pitch))
lb_txt = lb_txt + u'\u00B0'
_ln, = ax.plot(np.rad2deg(gamma[goodidx]), ratio[goodidx], lw=2,
label=lb_txt, c=bmap[idx])
# find equilibrium points
peq, geq = eqns.pitch_bifurcation([pitch], _gamma_equil, cl_fun, cd_fun,
angle_rng=(al[0], al[-1])).T
aeq = peq + geq
ratio_eq = cl_fun(aeq) / cd_fun(aeq)
_c = _ln.get_color()
ax.plot(np.rad2deg(geq), ratio_eq, 'o', c=_c, mec=_c, ms=9)
# for i in range(len(geq)):
# ax.axvline(np.rad2deg(geq[i]), color=_c)
leg = ax.legend(loc='upper right', frameon=False, fontsize=18)
#ax.set_xlim(np.deg2rad(np.r_[-10, 90]))
ax.set_xlim(0, 60)
ax.set_ylim(0, 3)
ax.set_xlabel(r'$\gamma$, glide angle', fontsize=18)
ax.set_ylabel(r'$C_L/C_D(\gamma + \theta)$', fontsize=18)
fig.canvas.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure2_effect_of_pitch.pdf', transparent=True)
# %% Find the glide angle and velocity at equilibrium (pitch of 0 deg)
peq, geq = eqns.pitch_bifurcation([0], _gamma_equil, cl_fun, cd_fun,
angle_rng=(al[0], al[-1])).T
peq, geq = float(peq), float(geq)
veq = eqns.v_equil(geq, cl_fun, cd_fun)
vxeq, vzeq = eqns.vxvz_equil(veq, geq)
cleq, cdeq = cl_fun(geq), cd_fun(geq)
assert np.allclose(np.arctan(cdeq / cleq), geq)
# %% Find equilibrium points
pitches = np.deg2rad(np.linspace(-25, 25, 4000))
gammas = np.deg2rad(np.linspace(10, 70, 1000))
sn_angle_rng = (al[0], al[-1])
sn_equil_exp = eqns.pitch_bifurcation(pitches, gammas, Cl_fun, Cd_fun,
angle_rng=sn_angle_rng)
sn_equil_spl = eqns.pitch_bifurcation(pitches, gammas, cl_fun, cd_fun,
angle_rng=sn_angle_rng)
# %% Classify the stability of fixed points
sn_td_exp, sn_ev_exp = eqns.tau_delta(sn_equil_exp, Cl_fun, Cd_fun,
Clprime_fun, Cdprime_fun,
angle_rng=sn_angle_rng)
sn_td_spl, sn_ev_spl = eqns.tau_delta(sn_equil_spl, cl_fun, cd_fun,
clprime_fun, cdprime_fun,
angle_rng=sn_angle_rng)
# %% Classification of fixed points
sn_nuni_exp, sn_uni_exp, sn_class_exp = eqns.classify_fp(sn_td_exp)
sn_nuni_spl, sn_uni_spl, sn_class_spl = eqns.classify_fp(sn_td_spl)
possible_class = ['saddle point', 'unstable focus', 'unstable node',
'stable focus', 'stable node']
bfbmap = [bmap[0], bmap[4], bmap[2], bmap[3], bmap[1]]
# %% Acceleration along terminal manifold when we have a saddle point
sad_idx = np.where(sn_class_spl == 'saddle point')[0]
sad_pitch, sad_gamma = sn_equil_spl[sad_idx].T
# we have some double saddle points below theta =2 deg; remove these
sad_idx = np.where(sad_pitch >= np.deg2rad(2))[0]
sad_pitch, sad_gamma = sad_pitch[sad_idx], sad_gamma[sad_idx]
sad_aoa = sad_pitch + sad_gamma
dcl_fun = cl_fun.derivative()
ddcl_fun = dcl_fun.derivative()
dcd_fun = cd_fun.derivative()
ddcd_fun = dcd_fun.derivative()
# 2nd order spline, needs more to get higher derivatives
#dddcl_fun = ddcl_fun.derivative()
#dddcd_fun = ddcd_fun.derivative()
# evaluate force coefficients at the saddle
sad_cl = cl_fun(sad_aoa)
sad_dcl = dcl_fun(sad_aoa)
sad_ddcl = ddcl_fun(sad_aoa)
sad_ddcl = np.zeros_like(sad_aoa)
sad_cd = cd_fun(sad_aoa)
sad_dcd = dcd_fun(sad_aoa)
sad_ddcd = ddcd_fun(sad_aoa)
sad_dddcd = np.zeros_like(sad_aoa)
# place the values in a large array for export
sad_angles = np.c_[np.rad2deg(sad_pitch), sad_pitch, sad_gamma, sad_aoa]
sad_lift = np.c_[sad_cl, sad_dcl, sad_ddcl, sad_ddcl]
sad_drag = np.c_[sad_cd, sad_dcd, sad_ddcd, sad_ddcd]
sad_export = np.c_[sad_angles, sad_lift, sad_drag]
# %%
# save the data
import pandas as pd
node_idx = np.where(sn_class_spl == 'stable node')[0]
node_pitch, node_gamma = sn_equil_spl[node_idx].T
# nodes, select ones with saddles
node_idx_with_saddles = np.where(np.in1d(node_pitch, sad_pitch))[0]
node_pitch = node_pitch[node_idx_with_saddles]
node_gamma = node_gamma[node_idx_with_saddles]
# do the reverse to ensure we have the same number of values
sad_idx_with_nodes = np.where(np.in1d(sad_pitch, node_pitch))[0]
# too many indices...
node_idx_with_saddles = []
for i in np.arange(len(sad_pitch)):
s_pitch = sad_pitch[i]
idx = np.where(node_pitch == s_pitch)[0]
if len(idx) == 0:
continue
elif len(idx) == 1:
node_idx_with_saddles.append(idx)
elif len(idx) > 1:
for ii in np.arange(len(idx)):
node_idx_with_saddles.append(idx[ii])
node_idx_with_saddles = np.array(node_idx_with_saddles)
# %% Spline bifurcation plot (deg) for paper
rd = np.rad2deg
gam_high = sn_angle_rng[0] - pitches # closer to 0
gam_low = sn_angle_rng[1] - pitches # closer to 90
fig, ax = plt.subplots()
ax.fill_between(rd(pitches), rd(gam_high), 0, color='gray', alpha=.1, lw=0)
ax.fill_between(rd(pitches), rd(gam_low), 60, color='gray', alpha=.1, lw=0)
ax.axvline(0, color='gray')
ax.axvline(5, color='gray')
for ii, fp_kind in enumerate(possible_class):
idx = np.where(sn_class_spl == fp_kind)[0]
if len(idx) > 0:
ax.plot(rd(sn_equil_spl[idx, 0]), rd(sn_equil_spl[idx, 1]), 'o',
c=bfbmap[ii], ms=2.5, label=fp_kind)
_leg = ax.legend(loc='lower left', markerscale=3, fancybox=True, framealpha=.75,
frameon=True, fontsize=16)
_leg.get_frame().set_color('w')
ax.set_xlim(-15, 15)
ax.set_ylim(60, 0)
#ax.set_ylabel(r'$\gamma^*$, equilibrium glide angle', fontsize=18)
#ax.set_xlabel(r'$\theta$, pitch angle', fontsize=18)
ax.set_ylabel(r'$\gamma^*$ ', fontsize=18, rotation=0)
ax.set_xlabel(r'$\theta$', fontsize=18)
ax.text(-13, 5, 'airfoil snake', {'fontsize': 18})
fig.canvas.draw()
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure6b_bifurcation_airfoil_snake.pdf',
transparent=True)
# %% Velocity polar diagram, pitch = 0
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
pitch = 0
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$0' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bi_vpd0_airfoil_snake.pdf', transparent=True)
# %% Velocity polar diagram, pitch = 0 with Z nullcline
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
pitch = 0
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, nullcline_z=True,
fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$0' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bi_vpd0_nullcline_airfoil_snake.pdf',
transparent=True)
# %% Velocity polar diagram, pitch = 5
pitch = np.deg2rad(5)
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
lab = 'airfoil snake, ' + r'$\theta=$5' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5bii_vpd5_airfoil_snake.pdf', transparent=True)
# %% Velocity polar diagram, pitch = 5, with manifold approximations
man_folder = './Data/airfoil snake manifold/'
man_2 = np.genfromtxt(man_folder + 'manifold_2nd_order.csv', delimiter=',')
man_3 = np.genfromtxt(man_folder + 'manifold_3rd_order.csv', delimiter=',')
vx_2, vz_2 = man_2.T
vx_3, vz_3 = man_3.T
pitch = np.deg2rad(5)
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=None, ax=None)
ax.plot(vx_2, vz_2, c=bmap[2], label='2nd-order approx.')
ax.plot(vx_3, vz_3, c=bmap[3], label='3rd-order approx.')
ax.legend(loc='lower right', frameon=True)
ax.set_xlim(.55, .8)
ax.set_ylim(-.525, -.275)
fig.savefig('Figures/figure5bii_inset_vpd5_airfoil_snake.pdf',
transparent=False)
# %% Supplement figure - Acclerations along the terminal velocity manifold
gam_2 = -np.arctan(vz_2 / vx_2)
gam_3 = -np.arctan(vz_3 / vx_3)
ptc_2 = np.deg2rad(5)
ptc_3 = np.deg2rad(5)
aoa_2 = gam_2 + ptc_2
aoa_3 = gam_3 + ptc_3
cl_2 = cl_fun(aoa_2)
cd_2 = cd_fun(aoa_2)
cl_3 = cl_fun(aoa_3)
cd_3 = cd_fun(aoa_3)
ax_2, az_2 = eqns.cart_eqns(vx_2, vz_2, cl_2, cd_2)
ax_3, az_3 = eqns.cart_eqns(vx_3, vz_3, cl_3, cd_3)
a_2 = np.sqrt(ax_2**2 + az_2**2)
a_3 = np.sqrt(ax_3**2 + az_3**2)
xx_2 = np.arange(len(a_2))
xx_3 = np.arange(len(a_3))
# arbitrary shift the indices for plotting; saddle at zero, stable node at 1
xx_2 = (xx_2 - 150) / 150
xx_3 = (xx_3 - 150) / 150
fig, ax = plt.subplots()
ax.axhline(.1, color=bmap[3], lw=1, label='low acceleration contour')
ax.axvline(0, color=bmap[0], lw=1, ls='--', label='location of saddle point')
ax.axvline(.93, color=bmap[1], lw=1, ls='--', label='location of stable node')
ax.plot(xx_2, a_2, c=bmap[2], lw=2, label='2nd order approx.')
ax.plot(xx_3, a_3, c=bmap[3], lw=2, label='3rd order approx.')
ax.legend(loc='upper left', frameon=True)
ax.set_xlabel('distance along terminal velocity manifold')
ax.set_ylabel('acceleration magnitude')
rcj(ax)
tl(fig)
fig.savefig('Figures/figure_SI_acceleration_along_manifold.pdf',
transparent=True)
# %% Figure 1 - show how VPD differs from time series approach
pitch = 0
ts = np.linspace(0, 30, 351)
vxseed, vzseed = np.r_[.4], np.r_[0]
odeargs = (pitch, cl_fun, cd_fun)
for i in range(len(vxseed)):
x0 = (0, 0, vxseed[i], vzseed[i])
soln = plots.ps_traj(x0, ts, odeargs, eqns.cart_model, arng,
vxlim, vzlim)
ntime = len(ts)
# unpack values
xs, zs, vxs, vzs = soln.T
gs = eqns.calc_gamma(vxs, vzs)
# just plot once the glide angle derivative is slow
idx = np.where(np.abs(np.gradient(gs)) >= 1e-4)[0]
xs, zs = xs[idx], zs[idx]
vxs, vzs = vxs[idx], vzs[idx]
gs = gs[idx]
ts = ts[idx]
accxs, acczs = np.zeros(len(ts)), np.zeros(len(ts))
for k in np.arange(len(ts)):
x0 = (xs[k], zs[k], vxs[k], vzs[k])
_, _, accxs[k], acczs[k] = eqns.cart_model(x0, ts[k], odeargs)
vmag = np.sqrt(vxs**2 + vzs**2)
accmag = np.sqrt(accxs**2 + acczs**2)
i0 = gs.argmax()
np.where(accmag <= 0.1)[0]
i1 = 15
i2 = 139
i3 = 147
ii = np.r_[i1, i2, i3]
ii = np.r_[140] # , 147] # end of bump in acceleration
# %% Plot time histories
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, sharex=True, figsize=(4.2, 8))
ax1.axhline(0, color='gray')
ax2.axhline(0, color='gray')
ax3.axhline(0, color='gray')
ax4.axhline(gs[-1], color='gray', ls=':')
lw = 1.5
ax1.plot(ts, xs, 'k', lw=lw)
ax1.plot(ts, zs, 'k--', lw=lw)
ax2.plot(ts, vxs, 'k', lw=lw)
ax2.plot(ts, vzs, 'k--', lw=lw)
ax3.plot(ts, accxs, 'k', label='horizontal', lw=lw)
ax3.plot(ts, acczs, 'k--', label='vertical', lw=lw)
ax4.plot(ts, gs, 'k', lw=lw)
# plot velocity and acceleration magnitudes
# ax3.plot(ts, accmag, 'k:', label='magnitude', lw=lw)
# ax2.plot(ts, vmag, 'k:', lw=lw)
kwargs = dict(marker='o', ms=7, mfc=None, mec='gray', mew=1, fillstyle='none')
ax1.plot(ts[i0], xs[i0], 'o', ms=7, c='gray')
ax1.plot(ts[i0], zs[i0], 'o', ms=7, c='gray')
ax1.plot(ts[ii], xs[ii], **kwargs)
ax1.plot(ts[ii], zs[ii], **kwargs)
ax2.plot(ts[i0], vxs[i0], 'o', ms=7, c='gray')
ax2.plot(ts[i0], vzs[i0], 'o', ms=7, c='gray')
ax2.plot(ts[ii], vxs[ii], **kwargs)
ax2.plot(ts[ii], vzs[ii], **kwargs)
ax3.plot(ts[i0], accxs[i0], 'o', ms=7, c='gray')
ax3.plot(ts[i0], acczs[i0], 'o', ms=7, c='gray')
ax3.plot(ts[ii], accxs[ii], **kwargs)
ax3.plot(ts[ii], acczs[ii], **kwargs)
ax4.plot(ts[i0], gs[i0], 'o', ms=7, c='gray')
ax4.plot(ts[ii], gs[ii], **kwargs)
ax3.legend(loc='lower right', fontsize=18)
for ax in [ax1, ax2, ax3, ax4]:
ax.set_yticks([])
ax.set_xticks([])
ttext = .5
ax1.text(ttext, .9 * np.r_[xs, zs].max(), 'position', fontsize=18)
ax2.text(ttext, .9 * np.r_[vxs, vzs].max(), 'velocity', fontsize=18)
ax3.text(ttext, .9 * np.r_[accxs, acczs].max(), 'acceleration',
fontsize=18)
ax4.text(ttext, .85 * np.pi / 2, 'glide angle', fontsize=18)
ax4.set_xlabel('time', fontsize=18)
#ax1.set_ylabel('position', fontsize=18)
#ax2.set_ylabel('velocity', fontsize=18)
#ax3.set_ylabel('acceleration', fontsize=18)
#ax4.set_ylabel('glide angle', fontsize=18)
ax4.set_xlim(0, ts[-1])
ax4.set_ylim(0, np.pi / 2)
rcj(ax1)
rcj(ax2)
rcj(ax3)
rcj(ax4)
tl(fig)
fig.savefig('Figures/1abcd_time_histories.pdf', transparent=True)
# %% Plot x-z space
skip = 10
fig, ax = plt.subplots(figsize=(4.2, 4.))
ax.plot(xs, zs, 'k-x', lw=1.5, markevery=skip, mew=.75)
ax.plot(xs[i0], zs[i0], 'o', ms=7, c='gray')
ax.plot(xs[ii], zs[ii], **kwargs)
ax.set_xlabel(r'$x$', fontsize=20)
ax.set_ylabel(r'$z$ ', rotation=0, fontsize=20)
ax.set_yticks([])
ax.set_xticks([])
ax.set_aspect('equal', adjustable='box')
ax.margins(0, .03)
rcj(ax)
tl(fig)
fig.savefig('Figures/1e_position_space.pdf', transparent=True)
# %% Plot velocity polar diagram
afdict = dict(cli=cl_fun, cdi=cd_fun, clip=clprime_fun, cdip=cdprime_fun)
arng = sn_angle_rng
extrap = (ale[0], ale[-1])
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
tvec = np.linspace(0, 30, 351)
reload(plots)
from plots import phase_plotter as ppr
fig, ax = plt.subplots(figsize=(4.2, 4))
fig, ax = ppr(afdict, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, extrap=extrap,
traj=plots.ps_traj_dp5, fig=fig, ax=ax, acc_contour=True)
ax.plot(vxs, vzs, 'kx-', lw=1.5, markevery=skip, mew=.75, ms=5)
ax.plot(vxs[i0], vzs[i0], 'o', ms=7, c='gray')
ax.plot(vxs[ii], vzs[ii], **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(r'$v_x$', fontsize=20)
ax.set_ylabel(r'$v_z$ ', fontsize=20, rotation=0)
fig.savefig('Figures/1f_velocity_space.pdf', transparent=True)
|
mit
| -5,088,139,220,984,201,000
| 26.925037
| 80
| 0.633953
| false
| 2.352065
| false
| false
| false
|
brain-tec/partner-contact
|
partner_multi_relation_tabs/tests/test_tab.py
|
1
|
1766
|
# Copyright 2018 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import common
from ..tablib import Tab
class TestTab(common.TestCommon):
def test_create_page(self):
self.assertTrue(bool(self.tab_board))
tab_obj = Tab(self.tab_board)
page = tab_obj.create_page()
# And we should have a field for (amongst others) selection_type_id.
field = page.xpath('//field[@name="type_selection_id"]')
self.assertTrue(field, 'Field selection_type_id not in page.')
def test_visibility(self):
"""Tab positions should be shown for functionaries, but not others."""
self.assertTrue(bool(self.tab_positions))
self.assertTrue(bool(self.partner_important_person))
self.assertTrue(bool(self.partner_common_person))
tab_obj = Tab(self.tab_positions)
self.assertTrue(
tab_obj.compute_visibility(self.partner_important_person),
'Positions tab should be visible for functionary.')
self.assertFalse(
tab_obj.compute_visibility(self.partner_common_person),
'Positions tab should not be visible for non-functionary.')
# Tab for departments should only be visible for main partner
self.assertTrue(bool(self.tab_departments))
self.assertTrue(bool(self.partner_big_company))
tab_obj = Tab(self.tab_departments)
self.assertTrue(
tab_obj.compute_visibility(self.env.ref('base.main_partner')),
'Department tab should be visible for main partner.')
self.assertFalse(
tab_obj.compute_visibility(self.partner_big_company),
'Department tab should not be visible for other partners.')
|
agpl-3.0
| 360,778,801,671,219,900
| 44.282051
| 78
| 0.660815
| false
| 3.933185
| false
| false
| false
|
bb111189/CryptoKnocker
|
CryptoKnocker/CryptoKnocker/settings.py
|
1
|
2239
|
"""
Django settings for CryptoKnocker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+6itzn97vm^deyw1c!g8h(i(u1pu%fg-^_vj*kabc#t_lqbd-7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.db',
'django.forms',
'mainpage',
'management',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CryptoKnocker.urls'
WSGI_APPLICATION = 'CryptoKnocker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
MEDIA_ROOT = os.path.join(os.getcwd())
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(os.getcwd(), 'static'),
)
#view path
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
mit
| 2,499,307,004,532,393,000
| 23.075269
| 71
| 0.714605
| false
| 3.175887
| false
| false
| false
|
peragro/peragro-at
|
src/damn_at/metadatastore.py
|
1
|
2111
|
"""
The MetaDataStore handler.
"""
from __future__ import absolute_import
import os
from damn_at.utilities import is_existing_file, pretty_print_file_description
from damn_at.bld import hash_to_dir
from damn_at.serialization import SerializeThriftMsg, DeserializeThriftMsg
from damn_at import FileDescription
from io import open
class MetaDataStoreException(Exception):
"""Base MetaDataStore Exception"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.msg = msg
def __str__(self):
return repr(self.msg)
class MetaDataStoreFileException(MetaDataStoreException):
"""Something wrong with the file"""
pass
class MetaDataStore(object):
"""
A filesystem MetaDataStore implementation.
"""
def __init__(self, store_path):
self.store_path = store_path
if not os.path.exists(self.store_path):
os.makedirs(self.store_path)
def is_in_store(self, store_id, an_hash):
"""
Check if the given file hash is in the store.
"""
return is_existing_file(os.path.join(self.store_path, hash_to_dir(an_hash)))
def get_metadata(self, store_id, an_hash):
"""
Get the FileDescription for the given hash.
"""
try:
with open(os.path.join(self.store_path, hash_to_dir(an_hash)), 'rb') as metadata:
a_file_descr = DeserializeThriftMsg(FileDescription(), metadata.read())
return a_file_descr
except IOError as ioe:
raise MetaDataStoreFileException('Failed to open FileDescription with hash %s' % an_hash, ioe)
def write_metadata(self, store_id, an_hash, a_file_descr):
"""
Write the FileDescription to this store.
"""
data = SerializeThriftMsg(a_file_descr)
path = os.path.join(self.store_path, hash_to_dir(an_hash))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as metadata:
metadata.write(data)
return a_file_descr
|
bsd-3-clause
| 1,144,821,180,542,708,000
| 30.984848
| 106
| 0.636191
| false
| 3.703509
| false
| false
| false
|
moyaproject/moya
|
moya/settings.py
|
1
|
10368
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from .containers import OrderedDict
from . import iniparse
from .compat import text_type, string_types, PY2, implements_to_string, implements_bool
from . import errors
from .tools import textual_list
from fs.path import dirname, join, normpath, relpath
import io
import os
def read_settings(fs, path):
with fs.safeopen(path, "rb") as settings_file:
cfg = iniparse.parse(settings_file)
return cfg
@implements_to_string
class SettingsKeyError(KeyError):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SettingsContainer(OrderedDict):
@classmethod
def apply_master(self, master, settings):
for section_name, section in master.items():
if section_name == "service":
continue
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
@classmethod
def read(cls, fs, path, master=None):
visited = []
if not isinstance(path, string_types):
for p in path:
if fs.isfile(p):
path = p
break
else:
raise errors.SettingsError(
"""settings file not found (looked for {} in {})""".format(
textual_list(path, join_word="and"), fs
)
)
settings_stack = []
while 1:
path = relpath(normpath(path))
if path in visited:
raise errors.SettingsError(
"""recursive extends detected, "{}" has already been extended""".format(
path
)
)
with fs.open(path, "rt") as settings_file:
s = iniparse.parse(
settings_file,
SettingsContainer(),
section_class=SettingsSectionContainer,
)
visited.append(path)
settings_stack.append(s)
if "extends" in s[""]:
# path = s['']['extends']
path = join(dirname(path), s[""]["extends"])
else:
break
settings_stack = settings_stack[::-1]
settings = settings_stack[0]
s = cls.__class__(settings_stack[0])
for s in settings_stack[1:]:
for section_name, section in s.items():
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
if master is not None:
cls.apply_master(master, settings)
return settings
@classmethod
def read_os(cls, path):
visited = []
settings_stack = []
while 1:
path = os.path.abspath(os.path.normpath(path))
if path in visited:
raise errors.SettingsError(
"""recursive extends detected, "{}" has already been extended""".format(
path
)
)
with io.open(path, "rt") as settings_file:
s = iniparse.parse(
settings_file,
SettingsContainer(),
section_class=SettingsSectionContainer,
)
visited.append(path)
settings_stack.append(s)
if "extends" in s[""]:
path = s[""]["extends"]
else:
break
settings_stack = settings_stack[::-1]
settings = settings_stack[0]
s = cls.__class__(settings_stack[0])
for s in settings_stack[1:]:
for section_name, section in s.items():
if section_name in settings:
settings[section_name].update(section)
else:
settings[section_name] = section
return settings
@classmethod
def read_from_file(self, settings_file):
"""Reads settings, but doesn't do any extends processing"""
settings = iniparse.parse(
settings_file, SettingsContainer(), section_class=SettingsSectionContainer
)
return settings
@classmethod
def from_dict(self, d):
return SettingsSectionContainer((k, SettingContainer(v)) for k, v in d.items())
@classmethod
def create(cls, **kwargs):
return cls.from_dict(kwargs)
def export(self, output_file, comments=None):
"""Write the settings to an open file"""
ini = iniparse.write(self, comments=comments)
output_file.write(ini)
def copy(self):
return SettingsContainer(self)
def __getitem__(self, key):
try:
return super(SettingsContainer, self).__getitem__(key)
except KeyError:
return EmptySettings()
def get(self, section_name, key, default=Ellipsis):
if section_name not in self:
if default is Ellipsis:
raise SettingsKeyError(
"required section [%s] not found in settings" % section_name
)
else:
return default
section = self[section_name]
if key not in section:
if default is Ellipsis:
raise SettingsKeyError(
"key '%s' not found in section [%s]" % (key, section_name)
)
else:
return default
return section[key]
def set(self, section_name, key, value):
if section_name not in self:
self[section_name] = SettingsSectionContainer()
self[section_name][key] = value
def get_bool(self, section_name, key, default=False):
value = self.get(section_name, key, "yes" if default else "no")
return value.strip().lower() in ("yes", "true")
def get_list(self, section_name, key, default=""):
value = self.get(section_name, key, default=default)
return [line.strip() for line in value.splitlines() if line.strip()]
def get_int(self, section_name, key, default=None):
value_text = self.get(section_name, key, None)
if value_text is None or not value_text.strip():
return None
try:
value = int(value_text)
except:
raise SettingsKeyError(
"key [{}]/{} should be empty or an integer value (not '{}')".format(
section_name, key, value_text
)
)
else:
return value
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
table += sorted(self.items())
console.table(table)
class SettingsSectionContainer(OrderedDict):
def get_int(self, key, default=None):
if key not in self:
return default
value = int(self[key])
return value
def get_bool(self, key, default=False):
value = self.get(key, "yes" if default else "no")
return value.strip().lower() in ("yes", "true")
def get_list(self, key, default=""):
value = self.get(key, default)
return [line.strip() for line in value.splitlines() if line.strip()]
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
table += sorted(self.items())
console.table(table)
def __setitem__(self, key, value):
value = SettingContainer(text_type(value))
super(SettingsSectionContainer, self).__setitem__(key, value)
@implements_bool
class EmptySettings(object):
def __getitem__(self, key):
if key == "list":
return []
if key == "bool":
return False
if key == "int":
return 0
return ""
def __repr__(self):
return "<emptysettings>"
def get_int(self, key, default=None):
return default
def get_bool(self, key, default=False):
return default
def get_list(self, key, default=""):
return default
def get(self, key, default=None):
return default
def __bool__(self):
return False
def __unicode__(self):
return ""
def __iter__(self):
return iter([])
def items(self):
return []
def __moyaconsole__(self, console):
from console import Cell
table = [(Cell("key", bold=True), Cell("value", bold=True))]
console.table(table)
@implements_to_string
class SettingContainer(text_type):
def __init__(self, setting_text):
if PY2:
super(SettingContainer, self).__init__(setting_text)
else:
super().__init__()
# self.raw = setting_text
self.setting_text = setting_text.strip()
self.lines = [line.strip() for line in setting_text.splitlines()] or []
self.first = self.lines[0] if self.lines else ""
self.bool = self.setting_text.lower() in ("yes", "true")
try:
self.int = int(self.setting_text)
except ValueError:
self.int = None
try:
self.float = float(self.setting_text)
except ValueError:
self.float = None
def __str__(self):
return self.setting_text
def __getitem__(self, index):
if isinstance(index, string_types):
if index == "list":
return self.lines
elif index == "bool":
return self.bool
elif index == "int":
return self.int
elif index == "float":
return self.float
return self.lines[index]
def __eq__(self, other):
return self.first == other
def __ne__(self, other):
return self.first != other
if not PY2:
def __hash__(self):
return super().__hash__()
if __name__ == "__main__":
settings = SettingsContainer()
print(settings["nothere"])
s = SettingContainer("foo\nbar")
print(s == "foo")
print(s["list"])
|
mit
| -4,349,579,899,935,977,000
| 29.404692
| 92
| 0.53559
| false
| 4.442159
| false
| false
| false
|
Macainian/BaseDjangoProject
|
website/management/commands/create_admin_account_migration.py
|
1
|
1751
|
import os
from django.core.management.base import BaseCommand
from website.settings import BASE_DIR
class Command(BaseCommand):
def handle(self, *args, **options):
migrations_folder = os.path.join(BASE_DIR, "website", "migrations")
admin_account_migration_text = self.get_admin_account_migration_text()
# Create BrowseView.py
with open(os.path.join(migrations_folder, "0001_initial.py"), "w+") as admin_account_migration_file:
admin_account_migration_file.write(admin_account_migration_text)
def get_admin_account_migration_text(self):
# The string below is specifically formatted this way to ensure that it looks correct on the actual file
# since we are using """
return \
"""from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from website.apps.staff_member_manager.models import StaffMember
def add_staff_members(apps, schema_editor):
if not StaffMember.objects.filter(user__username="admin").exists():
user = User.objects.create(username="admin")
staff_member = StaffMember.objects.create(user=user)
staff_member.user.is_staff = True
staff_member.user.is_active = True
staff_member.user.is_superuser = True
staff_member.user.set_password("1")
staff_member.generated_password = ""
staff_member.user.first_name = "System"
staff_member.user.last_name = "Superuser"
staff_member.user.save()
staff_member.save()
class Migration(migrations.Migration):
dependencies = [
("staff_member_manager", "0001_initial"),
]
operations = [
migrations.RunPython(add_staff_members),
]
"""
|
mit
| -4,347,252,034,574,000,600
| 32.692308
| 112
| 0.682467
| false
| 3.908482
| false
| false
| false
|
rpdillon/wikid
|
wikid/__init__.py
|
1
|
3539
|
#!/usr/bin/env python
# wikid, Copyright (c) 2010, R. P. Dillon <rpdillon@etherplex.org>
# This file is part of wikid.
#
# wikid is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
wikid - A Mercurial extension that provides a distributed wiki backed
by Merurial.
The intended use is to provide a distribute Wiki capability using
existing adopted technologies. hgwiki makes use of:
- mercurial for revision control
- web.py for templating and serving the web interface
- DocUtils' implementation of reStructuredText for markup
'''
import web
from wikid import WikiContent, ReadNode, EditNode, PageIndex, DeleteNode
from wikid import NodeHistory, StaticLibs, RecoverNode, PrintNode, Help, Upload, FileManagement
from wikid import getExtensionPath
def wikid(ui, repo, **opts):
"""
Invokes web.py to serve content using the WikiContentEngine.
"""
# Maps the VFS directory locations to the names of the classes
# that handle those requests.
urls = (
'/PageIndex', 'PageIndex',
'/upload', 'Upload',
'/Help', 'Help',
'/files/(.*)', 'FileManagement',
'/history/(.*)', 'NodeHistory',
'/delete/(.*)', 'DeleteNode',
'/recover/(.*)', 'RecoverNode',
'/edit/(.*)', 'EditNode',
'/lib/(.*)', 'StaticLibs',
'/print/(.*)', 'PrintNode',
'/(.*)', 'ReadNode'
)
from mercurial import hg
####
# Path modifications
import sys
# Because the Mercurial installer under Windows does not provide
# the necessary scaffolding to support web.py, we patch the system
# path with the locations of a Python installation
sys.path.append("C:\\Python26\\lib")
# wikid includes some necessary libraries (like DocUtils), so we
# add its path to the system path as well.
sys.path.append(getExtensionPath())
####
# Set the default revision, if none is specified
if opts['rev'] == '':
rev = repo.changelog.nodemap[repo.changelog.tip()]
else:
rev = opts['rev']
#Set up the content engine
WikiContent.setUi(ui)
WikiContent.setRepo(repo)
WikiContent.setRev(rev)
app = web.application(urls, globals())
# Hack to avoid web.py parsing mercurial's command-line args
sys.argv = ['wikid', opts['port']] # Hack to avoid web.py parsing mercurial's command-line args
# Fire off the web.py (CherryPy) server
app.run()
# Specifies the command line parameters and brief descriptions. For
# basic usage, navigate to the repository containing the wiki data and
# simply type:
#
# hg wikid
#
# To set the port, use something like:
#
# hg wikid -p 9000
#
# To set the revision you want to serve:
#
# hg wikid -r 2
cmdtable = {
"wikid": (wikid,
[('r', 'rev', '', 'The revision of the repository to serve.'),
('p', 'port', '8080', 'The port on which to serve.')],
"hg wikid [options]")
}
|
gpl-3.0
| -6,462,069,072,412,644,000
| 31.768519
| 99
| 0.658378
| false
| 3.780983
| false
| false
| false
|
jjdmol/LOFAR
|
CEP/Pipeline/framework/lofarpipe/support/xmllogging.py
|
1
|
5594
|
"""
xml based logging constructs and helpers functions
"""
import xml.dom.minidom as _xml
def add_child(head, name):
"""
Create a node with name. And append it to the node list of the supplied
node. (This function allows duplicate head names as specified by xml)
return the create node.
"""
local_document = _xml.Document()
created_node = local_document.createElement(name)
head.appendChild(created_node)
return created_node
def get_child(node, name):
"""
Return the first direct descendant (child) of the supplied node with
the tagname name. The default xml getchild also looks in child nodes.
Return None if no match is found
"""
for child in node.childNodes:
if child.nodeName == name:
return child
return None
def get_active_stack(calling_object, stack_name="active_stack"):
"""
returns the active stack on the current class
return None of it is not present
"""
if hasattr(calling_object, stack_name):
stack_node = calling_object.__getattribute__(stack_name)
if stack_node.getAttribute("type") == "active_stack":
return stack_node
return None
def add_child_to_active_stack_head(calling_object, child,
stack_name="active_stack"):
"""
Add the supplied child to the current active node in the active stack.
returns the added child on succes, None if not active stack was found.
Selection between active stacks can be done with the stack_name argument
"""
active_stack = get_active_stack(calling_object, stack_name="active_stack")
if not active_stack == None:
active_stack_node = get_child(active_stack, stack_name)
last_child = active_stack_node.lastChild
if last_child != None:
last_child.appendChild(child)
return child
return None
def enter_active_stack(calling_object, child,
stack_name="active_stack", comment=None):
"""
This function adds stack-like behaviour to an object:
On a 'fresh' class an xml node is added as a class attribute. This node
performs stack functionality and allows nested adding of nodes to track
functionality.
If the function is called on a class with an active_stack already present
a nested node is added.
The current nesting is book kept in the active stack. Past calls are
saved for logging purposes.
The comment argument allows adding extra info to a node
"""
active_stack_node = None
stack_node = None
# Check if the calling object has a active stack node with
# name == stack_name
if not hasattr(calling_object, stack_name):
# Create the xml node if it not exists
_throw_away_document = _xml.Document()
stack_node = \
_throw_away_document.createElement(stack_name)
# The xml name of the object is the calling object
stack_node.setAttribute("Name", calling_object.__class__.__name__)
stack_node.setAttribute("type", "active_stack")
# assign the node to the calling class as an attribute
calling_object.__setattr__(stack_name, stack_node)
# add the 'call stack'
active_stack_node = add_child(stack_node, stack_name) # generiek
else:
stack_node = calling_object.__getattribute__(stack_name)
# Find the active stack
active_stack_node = get_child(stack_node, stack_name)
if active_stack_node == None:
active_stack_node = add_child(stack_node, stack_name)
if comment != None:
stack_node.setAttribute("comment", comment)
active_stack_node.setAttribute("info",
"Contains functions not left with a return")
# if child is a string add a xml node with this name
stacked_child = None
if isinstance(child, basestring):
stacked_child = add_child(active_stack_node, child)
# else try adding it as a node
elif isinstance(child, _xml.Node):
active_stack_node.appendChild(child)
stacked_child = child
return stacked_child
def exit_active_stack(calling_object, stack_name="active_stack"):
"""
Mirror function to enter_active_stack.
Performs bookkeeping after leaving a stack:
Add the left node a child of the current active node.
If this is the last active node move it to the 'inactive node' list
"""
# get the named active stack node
if not hasattr(calling_object, stack_name):
raise ValueError(
"Tried leaving an active-stack which"
" has not been entered: stack_name={0} does not exist".format(
stack_name))
active_stack_node = calling_object.__getattribute__(
stack_name)
# get the active stack
active_stack = None
for child_node in active_stack_node.childNodes:
if child_node.nodeName == stack_name:
active_stack = child_node
break
# Get the current last item in the stack
last_child = active_stack.lastChild
# remove it
active_stack.removeChild(last_child)
# Now 'log' the now 'finished' step
if active_stack.lastChild == None:
# add to the main time_logger node
active_stack_node.appendChild(last_child)
else:
# add to the calling node info
active_stack.lastChild.appendChild(last_child)
|
gpl-3.0
| -4,474,977,738,795,691,000
| 35.046358
| 78
| 0.63157
| false
| 4.346542
| false
| false
| false
|
markstoehr/structured_gaussian_mixtures
|
lstm_gaussian_mixtures/test_negative_loglikelihood.py
|
1
|
2280
|
%autoindent
import numpy
import theano
from theano import tensor
def numpy_floatX(data):
return numpy.asarray(data, dtype=theano.config.floatX)
num_timesteps = 10
num_sequences = 3
num_dim = 2
num_components = 3
x_n = (numpy.arange(num_timesteps * num_sequences * num_dim,
dtype=theano.config.floatX)
.reshape(num_sequences, num_timesteps, num_dim)
.swapaxes(0, 1))
y_n = (numpy.arange(num_timesteps * num_sequences,
dtype=theano.config.floatX)
.reshape(num_sequences, num_timesteps)
.T + 2)
x = tensor.tensor3('x', dtype=theano.config.floatX)
y = tensor.matrix('y', dtype=theano.config.floatX)
W_n_sigma = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim,))
W_sigma = theano.shared(W_n_sigma, borrow=True, name='W_sigma')
W_n_mu = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim,))
W_mu = theano.shared(W_n_mu, borrow=True, name='W_mu')
W_n_mix = numpy.random.uniform(
low=-1,
high=1,
size=(num_dim, num_components,))
W_mix = theano.shared(W_n_mix, borrow=True, name='W_mix')
# check whether scan does what I think it does
def step(x_, y_, ll_):
v = tensor.mean((x_[:, 1:] - x_[:, :-1])**2, axis=-1)
mu = tensor.dot(x_, W_mu)
invsigma = tensor.maximum(tensor.nnet.sigmoid(
tensor.dot(x_, W_sigma)), 1e-8) / v
return (mu - y_)**2 * invsigma
lls, updates = theano.scan(step, sequences=[x, y],
outputs_info=[tensor.alloc(numpy_floatX(0.),
num_sequences)],
name='lls',
n_steps=num_timesteps)
f_lls = theano.function([x, y], lls)
f_updates = theano.function([], updates)
def sigmoid(z):
less_than_mask = z < -30
greater_than_mask = z > 30
in_range_mask = (- less_than_mask) * (- greater_than_mask)
out = numpy.empty(z.shape, dtype=float)
out[in_range_mask] = 1.0/(1+numpy.exp(-z[in_range_mask]))
out[less_than_mask] = 0.0
out[greater_than_mask] = 1.0
return out
mu_n = numpy.dot(x_n, W_n_mu)
invsigma_n = numpy.maximum(sigmoid(numpy.dot(x_n, W_n_sigma)), 1e-8)
lls_n = (mu_n - y_n)**2 * invsigma_n
|
apache-2.0
| -6,745,985,951,908,591,000
| 29.4
| 71
| 0.571053
| false
| 2.976501
| false
| false
| false
|
tensorflow/graphics
|
tensorflow_graphics/nn/layer/pointnet.py
|
1
|
8675
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the PointNet networks.
@inproceedings{qi2017pointnet,
title={Pointnet: Deep learning on point sets
for3d classification and segmentation},
author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
booktitle={Proceedings of the IEEE conference on computer vision and pattern
recognition},
pages={652--660},
year={2017}}
NOTE: scheduling of batchnorm momentum currently not available in keras. However
experimentally, using the batch norm from Keras resulted in better test accuracy
(+1.5%) than the author's [custom batch norm
version](https://github.com/charlesq34/pointnet/blob/master/utils/tf_util.py)
even when coupled with batchnorm momentum decay. Further, note the author's
version is actually performing a "global normalization", as mentioned in the
[tf.nn.moments documentation]
(https://www.tensorflow.org/api_docs/python/tf/nn/moments).
This shorthand notation is used throughout this module:
`B`: Number of elements in a batch.
`N`: The number of points in the point set.
`D`: Number of dimensions (e.g. 2 for 2D, 3 for 3D).
`C`: The number of feature channels.
"""
import tensorflow as tf
from tensorflow_graphics.util import export_api
class PointNetConv2Layer(tf.keras.layers.Layer):
"""The 2D convolution layer used by the feature encoder in PointNet."""
def __init__(self, channels, momentum):
"""Constructs a Conv2 layer.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
Args:
channels: the number of generated feature.
momentum: the momentum of the batch normalization layer.
"""
super(PointNetConv2Layer, self).__init__()
self.channels = channels
self.momentum = momentum
def build(self, input_shape):
"""Builds the layer with a specified input_shape."""
self.conv = tf.keras.layers.Conv2D(
self.channels, (1, 1), input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, N, 1, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, 1, C]`.
"""
return tf.nn.relu(self.bn(self.conv(inputs), training))
class PointNetDenseLayer(tf.keras.layers.Layer):
"""The fully connected layer used by the classification head in pointnet.
Note:
Differently from the standard Keras Conv2 layer, the order of ops is:
1. fully connected layer
2. batch normalization layer
3. ReLU activation unit
"""
def __init__(self, channels, momentum):
super(PointNetDenseLayer, self).__init__()
self.momentum = momentum
self.channels = channels
def build(self, input_shape):
"""Builds the layer with a specified input_shape."""
self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)
self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Executes the convolution.
Args:
inputs: a dense tensor of size `[B, D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, C]`.
"""
return tf.nn.relu(self.bn(self.dense(inputs), training))
class VanillaEncoder(tf.keras.layers.Layer):
"""The Vanilla PointNet feature encoder.
Consists of five conv2 layers with (64,64,64,128,1024) output channels.
Note:
PointNetConv2Layer are used instead of tf.keras.layers.Conv2D.
https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py
"""
def __init__(self, momentum=.5):
"""Constructs a VanillaEncoder keras layer.
Args:
momentum: the momentum used for the batch normalization layer.
"""
super(VanillaEncoder, self).__init__()
self.conv1 = PointNetConv2Layer(64, momentum)
self.conv2 = PointNetConv2Layer(64, momentum)
self.conv3 = PointNetConv2Layer(64, momentum)
self.conv4 = PointNetConv2Layer(128, momentum)
self.conv5 = PointNetConv2Layer(1024, momentum)
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Computes the PointNet features.
Args:
inputs: a dense tensor of size `[B,N,D]`.
training: flag to control batch normalization update statistics.
Returns:
Tensor with shape `[B, N, C=1024]`
"""
x = tf.expand_dims(inputs, axis=2) # [B,N,1,D]
x = self.conv1(x, training) # [B,N,1,64]
x = self.conv2(x, training) # [B,N,1,64]
x = self.conv3(x, training) # [B,N,1,64]
x = self.conv4(x, training) # [B,N,1,128]
x = self.conv5(x, training) # [B,N,1,1024]
x = tf.math.reduce_max(input_tensor=x, axis=1) # [B,1,1024]
return tf.squeeze(x) # [B,1024]
class ClassificationHead(tf.keras.layers.Layer):
"""The PointNet classification head.
The head consists of 2x PointNetDenseLayer layers (512 and 256 channels)
followed by a dropout layer (drop rate=30%) a dense linear layer producing the
logits of the num_classes classes.
"""
def __init__(self, num_classes=40, momentum=0.5, dropout_rate=0.3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for fully connected layer
"""
super(ClassificationHead, self).__init__()
self.dense1 = PointNetDenseLayer(512, momentum)
self.dense2 = PointNetDenseLayer(256, momentum)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.dense3 = tf.keras.layers.Dense(num_classes, activation="linear")
def call(self, inputs, training=None): # pylint: disable=arguments-differ
"""Computes the classifiation logits given features (note: without softmax).
Args:
inputs: tensor of points with shape `[B,D]`.
training: flag for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
x = self.dense1(inputs, training) # [B,512]
x = self.dense2(x, training) # [B,256]
x = self.dropout(x, training) # [B,256]
return self.dense3(x) # [B,num_classes)
class PointNetVanillaClassifier(tf.keras.layers.Layer):
"""The PointNet 'Vanilla' classifier (i.e. without spatial transformer)."""
def __init__(self, num_classes=40, momentum=.5, dropout_rate=.3):
"""Constructor.
Args:
num_classes: the number of classes to classify.
momentum: the momentum used for the batch normalization layer.
dropout_rate: the dropout rate for the classification head.
"""
super(PointNetVanillaClassifier, self).__init__()
self.encoder = VanillaEncoder(momentum)
self.classifier = ClassificationHead(
num_classes=num_classes, momentum=momentum, dropout_rate=dropout_rate)
def call(self, points, training=None): # pylint: disable=arguments-differ
"""Computes the classifiation logits of a point set.
Args:
points: a tensor of points with shape `[B, D]`
training: for batch normalization and dropout training.
Returns:
Tensor with shape `[B,num_classes]`
"""
features = self.encoder(points, training) # (B,1024)
logits = self.classifier(features, training) # (B,num_classes)
return logits
@staticmethod
def loss(labels, logits):
"""The classification model training loss.
Note:
see tf.nn.sparse_softmax_cross_entropy_with_logits
Args:
labels: a tensor with shape `[B,]`
logits: a tensor with shape `[B,num_classes]`
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits
residual = cross_entropy(labels, logits)
return tf.reduce_mean(input_tensor=residual)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
apache-2.0
| -210,864,469,539,070,560
| 34.264228
| 81
| 0.69487
| false
| 3.680526
| false
| false
| false
|
cfelton/minnesota
|
mn/cores/fifo/_fifo_sync.py
|
1
|
3380
|
# Copyright (c) 2014 Christopher L. Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import log, fmod, ceil
from myhdl import *
from _fifo_intf import check_fifo_intf
from _fifo_intf import _fifobus
from _fifo_mem import m_fifo_mem_generic
def m_fifo_sync(clock, reset, fbus):
""" Simple synchronous FIFO
PORTS
=====
PARAMETERS
==========
"""
# @todo: this is intended to be used for small fast fifo's but it
# can be used for large synchronous fifo as well
N = fbus.size
if fmod(log(N, 2), 1) != 0:
Asz = int(ceil(log(N,2)))
N = 2**Asz
print("@W: m_fifo_sync only supports power of 2 size")
print(" forcing size (depth) to %d instread of " % (N, fbus.size))
wptr = Signal(modbv(0, min=0, max=N))
rptr = Signal(modbv(0, min=0, max=N))
_vld = Signal(False)
# generic memory model
g_fifomem = m_fifo_mem_generic(clock, fbus.wr, fbus.wdata, wptr,
clock, fbus.rdata, rptr,
mem_size=fbus.size)
# @todo: almost full and almost empty flags
read = fbus.rd
write = fbus.wr
@always_seq(clock.posedge, reset=reset)
def rtl_fifo():
if fbus.clear:
wptr.next = 0
rptr.next = 0
fbus.full.next = False
fbus.empty.next = True
elif read and not write:
fbus.full.next = False
if not fbus.empty:
rptr.next = rptr + 1
if rptr == (wptr-1):
fbus.empty.next = True
elif write and not read:
fbus.empty.next = False
if not fbus.full:
wptr.next = wptr + 1
if wptr == (rptr-1):
fbus.full.next = True
elif write and read:
wptr.next = wptr + 1
rptr.next = rptr + 1
_vld.next = read
@always_comb
def rtl_assign():
fbus.rvld.next = _vld & fbus.rd
nvacant = Signal(intbv(N, min=-0, max=N+1)) # # empty slots
ntenant = Signal(intbv(0, min=-0, max=N+1)) # # filled slots
@always_seq(clock.posedge, reset=reset)
def dbg_occupancy():
if fbus.clear:
nvacant.next = N
ntenant.next = 0
else:
v = nvacant
f = ntenant
if fbus.rvld:
v = v + 1
f = f - 1
if fbus.wr:
v = v -1
f = f + 1
nvacant.next = v
ntenant.next = f
fbus.count = ntenant
return (g_fifomem, rtl_fifo, rtl_assign, dbg_occupancy,)
# attached a generic fifo bus object to the module
m_fifo_sync.fbus_intf = _fifobus
|
gpl-3.0
| 8,068,396,567,377,177,000
| 28.4
| 78
| 0.557692
| false
| 3.51717
| false
| false
| false
|
njantrania/osf.io
|
scripts/refresh_box_tokens.py
|
1
|
1487
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import logging
import datetime
from modularodm import Q
from dateutil.relativedelta import relativedelta
from scripts import utils as scripts_utils
from website.app import init_app
from website.oauth.models import ExternalAccount
from website.addons.base.exceptions import AddonError
from website.addons.box.utils import refresh_oauth_key
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_targets(delta):
return ExternalAccount.find(
Q('expires_at', 'lt', datetime.datetime.utcnow() + delta) &
Q('provider', 'eq', 'box')
)
def main(delta, dry_run):
for record in get_targets(delta):
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record._id,
record.expires_at.strftime('%c')
)
)
if not dry_run:
try:
refresh_oauth_key(record, force=True)
except AddonError as ex:
logger.error(ex.message)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
dry_run = 'dry' in sys.argv
try:
days = int(sys.argv[2])
except (IndexError, ValueError, TypeError):
days = 7 # refresh tokens that expire this week
delta = relativedelta(days=days)
# Log to file
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(delta, dry_run=dry_run)
|
apache-2.0
| 3,008,566,150,502,636,000
| 26.537037
| 69
| 0.63618
| false
| 3.803069
| false
| false
| false
|
denversc/cligen
|
cligen/target_python.py
|
1
|
1177
|
# Copyright 2015 Denver Coneybeare <denver@sleepydragon.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python target language support for cligen.
"""
from cligen.targets import Jinja2TargetLanguageBase
class PythonTargetLanguage(Jinja2TargetLanguageBase):
def __init__(self):
output_file = self.OutputFileInfo(
name="source file",
default_value="cligen.py",
template_name="python.py",
)
super().__init__(
key="python",
name="python",
output_files=(output_file,),
)
|
gpl-3.0
| -7,476,964,423,816,049,000
| 31.694444
| 71
| 0.68819
| false
| 4.144366
| false
| false
| false
|
chimkentec/KodiMODo_rep
|
plugin.video.torrenter/resources/scrapers/net.py
|
1
|
9741
|
# -*- coding: utf-8 -*-
import os
import time
import re
import urllib
import urllib2
import cookielib
import base64
import mimetools
import itertools
import xbmc
import xbmcgui
import xbmcvfs
RE = {
'content-disposition': re.compile('attachment;\sfilename="*([^"\s]+)"|\s')
}
# ################################
#
# HTTP
#
# ################################
class HTTP:
def __init__(self):
self._dirname = xbmc.translatePath('special://temp')
for subdir in ('xbmcup', 'plugin.video.torrenter'):
self._dirname = os.path.join(self._dirname, subdir)
if not xbmcvfs.exists(self._dirname):
xbmcvfs.mkdir(self._dirname)
def fetch(self, request, **kwargs):
self.con, self.fd, self.progress, self.cookies, self.request = None, None, None, None, request
if not isinstance(self.request, HTTPRequest):
self.request = HTTPRequest(url=self.request, **kwargs)
self.response = HTTPResponse(self.request)
xbmc.log('XBMCup: HTTP: request: ' + str(self.request), xbmc.LOGDEBUG)
try:
self._opener()
self._fetch()
except Exception, e:
xbmc.log('XBMCup: HTTP: ' + str(e), xbmc.LOGERROR)
if isinstance(e, urllib2.HTTPError):
self.response.code = e.code
self.response.error = e
else:
self.response.code = 200
if self.fd:
self.fd.close()
self.fd = None
if self.con:
self.con.close()
self.con = None
if self.progress:
self.progress.close()
self.progress = None
self.response.time = time.time() - self.response.time
xbmc.log('XBMCup: HTTP: response: ' + str(self.response), xbmc.LOGDEBUG)
return self.response
def _opener(self):
build = [urllib2.HTTPHandler()]
if self.request.redirect:
build.append(urllib2.HTTPRedirectHandler())
if self.request.proxy_host and self.request.proxy_port:
build.append(urllib2.ProxyHandler(
{self.request.proxy_protocol: self.request.proxy_host + ':' + str(self.request.proxy_port)}))
if self.request.proxy_username:
proxy_auth_handler = urllib2.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'uri', self.request.proxy_username,
self.request.proxy_password)
build.append(proxy_auth_handler)
if self.request.cookies:
self.request.cookies = os.path.join(self._dirname, self.request.cookies)
self.cookies = cookielib.MozillaCookieJar()
if os.path.isfile(self.request.cookies):
self.cookies.load(self.request.cookies)
build.append(urllib2.HTTPCookieProcessor(self.cookies))
urllib2.install_opener(urllib2.build_opener(*build))
def _fetch(self):
params = {} if self.request.params is None else self.request.params
if self.request.upload:
boundary, upload = self._upload(self.request.upload, params)
req = urllib2.Request(self.request.url)
req.add_data(upload)
else:
if self.request.method == 'POST':
if isinstance(params, dict) or isinstance(params, list):
params = urllib.urlencode(params)
req = urllib2.Request(self.request.url, params)
else:
req = urllib2.Request(self.request.url)
for key, value in self.request.headers.iteritems():
req.add_header(key, value)
if self.request.upload:
req.add_header('Content-type', 'multipart/form-data; boundary=%s' % boundary)
req.add_header('Content-length', len(upload))
if self.request.auth_username and self.request.auth_password:
req.add_header('Authorization', 'Basic %s' % base64.encodestring(
':'.join([self.request.auth_username, self.request.auth_password])).strip())
self.con = urllib2.urlopen(req, timeout=self.request.timeout)
# self.con = urllib2.urlopen(req)
self.response.headers = self._headers(self.con.info())
if self.request.download:
self._download()
else:
self.response.body = self.con.read()
if self.request.cookies:
self.cookies.save(self.request.cookies)
def _download(self):
fd = open(self.request.download, 'wb')
if self.request.progress:
self.progress = xbmcgui.DialogProgress()
self.progress.create(u'Download')
bs = 1024 * 8
size = -1
read = 0
name = None
if self.request.progress:
if 'content-length' in self.response.headers:
size = int(self.response.headers['content-length'])
if 'content-disposition' in self.response.headers:
r = RE['content-disposition'].search(self.response.headers['content-disposition'])
if r:
name = urllib.unquote(r.group(1))
while 1:
buf = self.con.read(bs)
if buf == '':
break
read += len(buf)
fd.write(buf)
if self.request.progress:
self.progress.update(*self._progress(read, size, name))
self.response.filename = self.request.download
def _upload(self, upload, params):
res = []
boundary = mimetools.choose_boundary()
part_boundary = '--' + boundary
if params:
for name, value in params.iteritems():
res.append([part_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', value])
if isinstance(upload, dict):
upload = [upload]
for obj in upload:
name = obj.get('name')
filename = obj.get('filename', 'default')
content_type = obj.get('content-type')
try:
body = obj['body'].read()
except AttributeError:
body = obj['body']
if content_type:
res.append([part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.quote(filename)),
'Content-Type: %s' % content_type, '', body])
else:
res.append([part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.quote(filename)), '',
body])
result = list(itertools.chain(*res))
result.append('--' + boundary + '--')
result.append('')
return boundary, '\r\n'.join(result)
def _headers(self, raw):
headers = {}
for line in raw.headers:
pair = line.split(':', 1)
if len(pair) == 2:
tag = pair[0].lower().strip()
value = pair[1].strip()
if tag and value:
headers[tag] = value
return headers
def _progress(self, read, size, name):
res = []
if size < 0:
res.append(1)
else:
res.append(int(float(read) / (float(size) / 100.0)))
if name:
res.append(u'File: ' + name)
if size != -1:
res.append(u'Size: ' + self._human(size))
res.append(u'Load: ' + self._human(read))
return res
def _human(self, size):
human = None
for h, f in (('KB', 1024), ('MB', 1024 * 1024), ('GB', 1024 * 1024 * 1024), ('TB', 1024 * 1024 * 1024 * 1024)):
if size / f > 0:
human = h
factor = f
else:
break
if human is None:
return (u'%10.1f %s' % (size, u'byte')).replace(u'.0', u'')
else:
return u'%10.2f %s' % (float(size) / float(factor), human)
class HTTPRequest:
def __init__(self, url, method='GET', headers=None, cookies=None, params=None, upload=None, download=None,
progress=False, auth_username=None, auth_password=None, proxy_protocol='http', proxy_host=None,
proxy_port=None, proxy_username=None, proxy_password='', timeout=20.0, redirect=True, gzip=False):
if headers is None:
headers = {}
self.url = url
self.method = method
self.headers = headers
self.cookies = cookies
self.params = params
self.upload = upload
self.download = download
self.progress = progress
self.auth_username = auth_username
self.auth_password = auth_password
self.proxy_protocol = proxy_protocol
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.timeout = timeout
self.redirect = redirect
self.gzip = gzip
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ','.join('%s=%r' % i for i in self.__dict__.iteritems()))
class HTTPResponse:
def __init__(self, request):
self.request = request
self.code = None
self.headers = {}
self.error = None
self.body = None
self.filename = None
self.time = time.time()
def __repr__(self):
args = ','.join('%s=%r' % i for i in self.__dict__.iteritems() if i[0] != 'body')
if self.body:
args += ',body=<data>'
else:
args += ',body=None'
return '%s(%s)' % (self.__class__.__name__, args)
|
gpl-3.0
| 6,977,368,259,031,119,000
| 31.79798
| 119
| 0.539267
| false
| 4.072324
| false
| false
| false
|
inveniosoftware-contrib/json-merger
|
json_merger/conflict.py
|
1
|
4958
|
# -*- coding: utf-8 -*-
#
# This file is part of Inspirehep.
# Copyright (C) 2016 CERN.
#
# Inspirehep is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Inspirehep is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inspirehep; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import json
from pyrsistent import freeze, thaw
from .utils import force_list
class ConflictType(object):
"""Types of Conflict.
Attributes:
REORDER: The list specified by the path might need to be reordered.
MANUAL_MERGE: The triple specified as the conflict body needs to be
manually merged and added to the conflict path.
ADD_BACK_TO_HEAD: The object specified as the conflict body might
need to be added back to the list specified in the conflict's path.
SET_FIELD: The object specified as the conflict body needs to be
added at the path specified in the conflict object.
REMOVE_FIELD: The value or object present at the path specified in
the path conflict needs to be removed.
INSERT: The object specified as the conflict body needs to be
inserted at the path specified in the conflict object.
"""
pass
_CONFLICTS = (
'REORDER',
'MANUAL_MERGE',
'ADD_BACK_TO_HEAD',
'SET_FIELD',
'REMOVE_FIELD',
'INSERT',
)
for conflict_type in _CONFLICTS:
setattr(ConflictType, conflict_type, conflict_type)
class Conflict(tuple):
"""Immutable and Hashable representation of a conflict.
Attributes:
conflict_type: A :class:`json_merger.conflict.ConflictType` member.
path: A tuple containing the path to the conflictual field.
body: Optional value representing the body of the conflict.
Note:
Even if the conflict body can be any arbitrary object, this is saved
internally as an immutable object so that a Conflict instance can be
safely used in sets or as a dict key.
"""
# Based on http://stackoverflow.com/a/4828108
# Compatible with Python<=2.6
def __new__(cls, conflict_type, path, body):
if conflict_type not in _CONFLICTS:
raise ValueError('Bad Conflict Type %s' % conflict_type)
body = freeze(body)
return tuple.__new__(cls, (conflict_type, path, body))
conflict_type = property(lambda self: self[0])
path = property(lambda self: self[1])
body = property(lambda self: thaw(self[2]))
def with_prefix(self, root_path):
"""Returns a new conflict with a prepended prefix as a path."""
return Conflict(self.conflict_type, root_path + self.path, self.body)
def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
elif self.conflict_type == "INSERT":
op = "add"
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts)
|
gpl-2.0
| -1,570,750,061,068,503,300
| 33.430556
| 79
| 0.635942
| false
| 4.097521
| false
| false
| false
|
theworldbright/mainsite
|
aspc/courses/models.py
|
1
|
9654
|
from django.db import models
from django.conf import settings
from datetime import date, datetime, timedelta
import json
from django.core.serializers.json import DjangoJSONEncoder
CAMPUSES = (
(1, u'PO'), (2, u'SC'), (3, u'CMC'), (4, u'HM'), (5, u'PZ'), (6, u'CGU'), (7, u'CU'), (8, u'KS'), (-1, u'?'))
CAMPUSES_FULL_NAMES = {1: 'Pomona', 2: 'Scripps', 3: 'Claremont-McKenna', 4: 'Harvey Mudd', 5: 'Pitzer'}
CAMPUSES_LOOKUP = dict([(a[1], a[0]) for a in CAMPUSES])
# Some campuses are represented more than one way so we make aliases
CAMPUSES_LOOKUP['CM'] = CAMPUSES_LOOKUP['CMC']
CAMPUSES_LOOKUP['CUC'] = CAMPUSES_LOOKUP['CU']
CAMPUSES_LOOKUP['CG'] = CAMPUSES_LOOKUP['CGU']
SESSIONS = ((u'SP', u'Spring'), (u'FA', u'Fall'))
SUBSESSIONS = ((u'P1', u'1'), (u'P2', u'2'))
# TODO: Make this robust for different semesters
# (see the academic calendar at http://catalog.pomona.edu/content.php?catoid=14&navoid=2582)
START_DATE = date(2015, 9, 1)
END_DATE = date(2015, 12, 18)
class Term(models.Model):
key = models.CharField(max_length=20, unique=True)
year = models.PositiveSmallIntegerField()
session = models.CharField(max_length=2, choices=SESSIONS)
def __unicode__(self):
return u'%s %s' % (self.session, self.year)
class Meta:
ordering = ['-year', 'session']
class Instructor(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Department(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
name = models.CharField(max_length=100)
def course_count(self):
return len(self.primary_course_set.all())
def non_breaking_name(self):
return self.name.replace(' ', ' ')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('department_detail', (), {'slug': self.code, })
class RequirementArea(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
name = models.CharField(max_length=100)
campus = models.SmallIntegerField(choices=CAMPUSES)
def course_count(self):
return len(self.course_set.all())
def non_breaking_name(self):
return self.name.replace(' ', ' ')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('requirement_area_detail', (), {'slug': self.code, })
class Course(models.Model):
code = models.CharField(max_length=20, unique=True, db_index=True)
code_slug = models.CharField(max_length=20, unique=True, db_index=True)
number = models.IntegerField(default=0)
name = models.CharField(max_length=256)
primary_department = models.ForeignKey(Department, related_name='primary_course_set', null=True)
departments = models.ManyToManyField(Department, related_name='course_set')
requirement_areas = models.ManyToManyField(RequirementArea, related_name='course_set')
def __unicode__(self):
return u'[%s] %s' % (self.code, self.name)
class Meta:
ordering = ('code',)
class Section(models.Model):
term = models.ForeignKey(Term, related_name='sections')
course = models.ForeignKey(Course, related_name='sections')
code = models.CharField(max_length=20)
code_slug = models.CharField(max_length=20)
instructors = models.ManyToManyField(Instructor, related_name='sections')
grading_style = models.CharField(max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
note = models.TextField(blank=True, null=True)
credit = models.FloatField()
requisites = models.BooleanField(default=False)
fee = models.BooleanField(default=False)
perms = models.IntegerField(null=True)
spots = models.IntegerField(null=True)
filled = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return self.code
# return u'[%s] %s (%s)' % (
# self.code, self.course.name, ', '.join(self.instructors.all().values_list('name', flatten=True)))
def get_campuses(self):
campuses = []
for mtg in self.meeting_set.all():
campuses.append(mtg.get_campus())
return campuses
def get_campus(self):
campii = self.get_campuses()
if len(campii) > 0:
return self.get_campuses()[0]
else:
return 'UU'
def json(self):
event_list = []
for mtg in self.meeting_set.all():
for begin, end in mtg.to_datetime_ranges():
event_list.append({
'id': '%s-%s-%s' % (self.code, mtg.id, begin.strftime('%w')),
'start': begin,
'end': end,
'title': self.code,
})
return {'events': event_list, 'info': {'course_code': self.code, 'course_code_slug': self.code_slug,
'detail_url': self.get_absolute_url(),
'campus_code': self.get_campus(), }}
@models.permalink
def get_absolute_url(self):
if not self.course.primary_department: print self.course
return ('course_detail', (), {'course_code': self.code_slug, 'dept': self.course.primary_department.code, })
class Meta:
ordering = ('code',)
class Meeting(models.Model):
section = models.ForeignKey(Section)
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
begin = models.TimeField()
end = models.TimeField()
campus = models.SmallIntegerField(choices=CAMPUSES)
location = models.CharField(max_length=100)
def gen_days(self):
s = []
if self.monday: s.append('M')
if self.tuesday: s.append('T')
if self.wednesday: s.append('W')
if self.thursday: s.append('R')
if self.friday: s.append('F')
return s
def to_datetime_ranges(self, base_date=None):
ranges = []
combine_dates = []
# Historical note: the frontend calendar supports navigating week
# by week, but we've turned it into a stripped down week calendar.
#
# Under the hood, it still wants a timestamp for events, though it
# doesn't matter what as long as the day of the week works correctly.
frontend_calendar_start = date(2012, 9, 3)
# Note: the version of JQuery-WeekCalendar we have gets off by two on
# computing day-of-week starting in 2013. Rather than fix this, since
# we don't use the rest of its features, we froze it in the past.
if not base_date:
base_date = frontend_calendar_start
if self.monday:
combine_dates.append(base_date + timedelta(
days=(7 + 0 - base_date.weekday()) % 7 # get correct weekday
# offset depending on
# start date weekday
))
if self.tuesday:
combine_dates.append(base_date + timedelta(
days=(7 + 1 - base_date.weekday()) % 7
))
if self.wednesday:
combine_dates.append(base_date + timedelta(
days=(7 + 2 - base_date.weekday()) % 7
))
if self.thursday:
combine_dates.append(base_date + timedelta(
days=(7 + 3 - base_date.weekday()) % 7
))
if self.friday:
combine_dates.append(base_date + + timedelta(
days=(7 + 4 - base_date.weekday()) % 7
))
for basedate in combine_dates:
begin = datetime.combine(basedate, self.begin)
end = datetime.combine(basedate, self.end)
if end > begin: # Sanity check for malformed meetings in CX
ranges.append((begin, end))
return ranges
def get_campus(self):
return CAMPUSES[self.campus - 1][1] # CAMPUSES is now 1-based
def __unicode__(self):
return u'[%s] Meeting every %s, %s-%s' % (
self.section.code, ''.join(self.gen_days()), self.begin.strftime('%I:%M %p'), self.end.strftime('%I:%M %p'))
class Schedule(models.Model):
sections = models.ManyToManyField(Section)
create_ts = models.DateTimeField(default=datetime.now)
def json(self):
all_sections = []
for section in self.sections.all():
all_sections.append(section.json())
return all_sections
def json_encoded(self):
return json.dumps(self.json(), cls=DjangoJSONEncoder)
@models.permalink
def get_absolute_url(self):
return ('aspc.courses.views.view_schedule', (self.id,))
def outside_url(self):
return u''.join([settings.OUTSIDE_URL_BASE, self.get_absolute_url()])
def __unicode__(self):
return u'Schedule %i' % (self.id,)
class RefreshHistory(models.Model):
FULL = 0
REGISTRATION = 1
run_date = models.DateTimeField(default=datetime.now)
last_refresh_date = models.DateTimeField()
term = models.ForeignKey(Term, related_name='term')
type = models.IntegerField(choices=(
(FULL, 'Full'),
(REGISTRATION, 'Registration'),
))
def __unicode__(self):
return u"{0} refresh at {1}".format(self.get_type_display(), self.last_refresh_date.isoformat())
class Meta:
verbose_name_plural = 'refresh histories'
|
mit
| -5,253,931,363,524,595,000
| 33.355872
| 120
| 0.610317
| false
| 3.554492
| false
| false
| false
|
3324fr/spinalcordtoolbox
|
dev/control_points/make_centerline.py
|
1
|
3765
|
# Main fonction return the centerline of the mifti image fname as a nifti binary file
# Centerline is generated using sct_nurbs with nbControl = size/div
from sct_nurbs_v2 import *
import nibabel
import splines_approximation_v2 as spline_app
from scipy import ndimage
import numpy
import commands
import linear_fitting as lf
import sct_utils
def returnCenterline(fname = None, nurbs = 0, div = 0):
if fname == None:
fname = 't250_half_sup_straight_seg.nii.gz'
file = nibabel.load(fname)
data = file.get_data()
hdr_seg = file.get_header()
nx, ny, nz = spline_app.getDim(fname)
x = [0 for iz in range(0, nz, 1)]
y = [0 for iz in range(0, nz, 1)]
z = [iz for iz in range(0, nz, 1)]
for iz in range(0, nz, 1):
x[iz], y[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))
points = [[x[n],y[n],z[n]] for n in range(len(x))]
p1, p2, p3 = spline_app.getPxDimensions(fname)
size = spline_app.getSize(x, y, z, p1, p2, p3)
data = data*0
if nurbs:
if check_nurbs(div, size, points) != 0:
x_centerline_fit=P[0]
y_centerline_fit=P[1]
z_centerline_fit=P[2]
for i in range(len(z_centerline_fit)) :
data[int(round(x_centerline_fit[i])),int(round(y_centerline_fit[i])),int(z_centerline_fit[i])] = 1
else: return 1
else:
for i in range(len(z)) :
data[int(round(x[i])),int(round(y[i])),int(z[i])] = 1
path, file_name, ext_fname = sct_utils.extract_fname(fname)
img = nibabel.Nifti1Image(data, None, hdr_seg)
#return img
saveFile(file_name, img, div)
return size
def check_nurbs(div, size = 0, points = 0, centerline = ''):
if centerline == '':
print 'div = ',div,' size = ', round(size)
nurbs = NURBS(int(round(size)), int(div), 3, 3000, points)
P = nurbs.getCourbe3D()
if P==1:
print "ERROR: instability in NURBS computation, div will be incremented. "
return 1
else:
file = nibabel.load(centerline)
data = file.get_data()
hdr_seg = file.get_header()
nx, ny, nz = spline_app.getDim(centerline)
x = [0 for iz in range(0, nz, 1)]
y = [0 for iz in range(0, nz, 1)]
z = [iz for iz in range(0, nz, 1)]
for iz in range(0, nz, 1):
x[iz], y[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))
points = [[x[n],y[n],z[n]] for n in range(len(x))]
p1, p2, p3 = spline_app.getPxDimensions(centerline)
size = spline_app.getSize(x, y, z, p1, p2, p3)
print 'div = ',div,' size = ', round(size)
#nurbs = NURBS(int(round(size)), int(div), 3, 3000, points) --> this work with sct_nurbs_v1
try:
nurbs = NURBS(3, 3000, points, False, None, int(round(size)), int(div))
P = nurbs.getCourbe3D()
except UnboundLocalError:
print "ERROR: instability in NURBS computation, UnboundLocalError caught, div will be incremented. "
return 1
except ZeroDivisionError:
print "ERROR: instability in NURBS computation, ZeroDivisionError caught, div will be incremented. "
return 1
if P==1:
print "ERROR: instability in NURBS computation, div will be incremented. "
return 1
else: return round(size)
def saveFile(file_name, img, div):
path_centerline = './centerlines/'+file_name+'_'+str(div)+'_centerline.nii.gz'
nibabel.save(img,path_centerline)
#cmd = 'sct_straighten_spinalcord -i '+path_centerline+' -c '+fname
#print cmd
#commands.getstatusoutput(cmd)
#cmd = 'sct_propseg'
if __name__ == "__main__":
returnCenterline()
|
mit
| 7,278,184,712,280,968,000
| 31.188034
| 114
| 0.592297
| false
| 2.973934
| false
| false
| false
|
ceibal-tatu/sugar-toolkit-gtk3
|
src/sugar3/graphics/window.py
|
1
|
10947
|
# Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2009, Aleksey Lim, Sayamindu Dasgupta
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
STABLE.
"""
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import GdkX11
from gi.repository import Gtk
import warnings
from sugar3.graphics.icon import Icon
from sugar3.graphics import palettegroup
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT = 2
class UnfullscreenButton(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.set_decorated(False)
self.set_resizable(False)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_border_width(0)
self.props.accept_focus = False
#Setup estimate of width, height
valid_, w, h = Gtk.icon_size_lookup(Gtk.IconSize.LARGE_TOOLBAR)
self._width = w
self._height = h
screen = self.get_screen()
screen.connect('size-changed', self._screen_size_changed_cb)
self._button = Gtk.Button()
self._button.set_relief(Gtk.ReliefStyle.NONE)
self._icon = Icon(icon_name='view-return',
icon_size=Gtk.IconSize.LARGE_TOOLBAR)
self._icon.show()
self._button.add(self._icon)
self._button.show()
self.add(self._button)
def connect_button_clicked(self, cb):
self._button.connect('clicked', cb)
def _reposition(self):
x = Gdk.Screen.width() - self._width
self.move(x, 0)
def do_get_preferred_width(self):
minimum, natural = Gtk.Window.do_get_preferred_width(self)
self._width = minimum
self._reposition()
return minimum, natural
def _screen_size_changed_cb(self, screen):
self._reposition()
class Window(Gtk.Window):
def __init__(self, **args):
self._enable_fullscreen_mode = True
GObject.GObject.__init__(self, **args)
self.set_decorated(False)
self.connect('realize', self.__window_realize_cb)
self.connect('key-press-event', self.__key_press_cb)
# OSK support: canvas auto panning based on input focus
if GObject.signal_lookup('request-clear-area', Window) != 0 and \
GObject.signal_lookup('unset-clear-area', Window) != 0:
self.connect('size-allocate', self.__size_allocate_cb)
self.connect('request-clear-area', self.__request_clear_area_cb)
self.connect('unset-clear-area', self.__unset_clear_area_cb)
self._clear_area_dy = 0
self._toolbar_box = None
self._alerts = []
self._canvas = None
self.tray = None
self.__vbox = Gtk.VBox()
self.__hbox = Gtk.HBox()
self.__vbox.pack_start(self.__hbox, True, True, 0)
self.__hbox.show()
self.add_events(Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.TOUCH_MASK)
self.connect('motion-notify-event', self.__motion_notify_cb)
self.connect('button-release-event', self.__button_press_event_cb)
self.add(self.__vbox)
self.__vbox.show()
self._is_fullscreen = False
self._unfullscreen_button = UnfullscreenButton()
self._unfullscreen_button.set_transient_for(self)
self._unfullscreen_button.connect_button_clicked(
self.__unfullscreen_button_clicked)
self._unfullscreen_button_timeout_id = None
def reveal(self):
""" Make window active
In contrast with present(), brings window to the top
even after invoking on response on non-gtk events.
See #1423.
"""
window = self.get_window()
if window is None:
self.show()
return
timestamp = Gtk.get_current_event_time()
if not timestamp:
timestamp = GdkX11.x11_get_server_time(window)
window.focus(timestamp)
def fullscreen(self):
palettegroup.popdown_all()
if self._toolbar_box is not None:
self._toolbar_box.hide()
if self.tray is not None:
self.tray.hide()
self._is_fullscreen = True
if self.props.enable_fullscreen_mode:
self._unfullscreen_button.show()
if self._unfullscreen_button_timeout_id is not None:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
self._unfullscreen_button_timeout_id = \
GObject.timeout_add_seconds( \
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT, \
self.__unfullscreen_button_timeout_cb)
def unfullscreen(self):
if self._toolbar_box is not None:
self._toolbar_box.show()
if self.tray is not None:
self.tray.show()
self._is_fullscreen = False
if self.props.enable_fullscreen_mode:
self._unfullscreen_button.hide()
if self._unfullscreen_button_timeout_id:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
def set_canvas(self, canvas):
if self._canvas:
self.__hbox.remove(self._canvas)
if canvas:
self.__hbox.pack_start(canvas, True, True, 0)
self._canvas = canvas
self.__vbox.set_focus_child(self._canvas)
def get_canvas(self):
return self._canvas
canvas = property(get_canvas, set_canvas)
def get_toolbar_box(self):
return self._toolbar_box
def set_toolbar_box(self, toolbar_box):
if self._toolbar_box:
self.__vbox.remove(self._toolbar_box)
if toolbar_box:
self.__vbox.pack_start(toolbar_box, False, False, 0)
self.__vbox.reorder_child(toolbar_box, 0)
self._toolbar_box = toolbar_box
toolbar_box = property(get_toolbar_box, set_toolbar_box)
def set_tray(self, tray, position):
if self.tray:
box = self.tray.get_parent()
box.remove(self.tray)
if position == Gtk.PositionType.LEFT:
self.__hbox.pack_start(tray, False, False, 0)
elif position == Gtk.PositionType.RIGHT:
self.__hbox.pack_end(tray, False, False, 0)
elif position == Gtk.PositionType.BOTTOM:
self.__vbox.pack_end(tray, False, False, 0)
self.tray = tray
def add_alert(self, alert):
self._alerts.append(alert)
if len(self._alerts) == 1:
self.__vbox.pack_start(alert, False, False, 0)
if self._toolbar_box is not None:
self.__vbox.reorder_child(alert, 1)
else:
self.__vbox.reorder_child(alert, 0)
def remove_alert(self, alert):
if alert in self._alerts:
self._alerts.remove(alert)
# if the alert is the visible one on top of the queue
if alert.get_parent() is not None:
self.__vbox.remove(alert)
if len(self._alerts) >= 1:
self.__vbox.pack_start(self._alerts[0], False, False, 0)
if self._toolbar_box is not None:
self.__vbox.reorder_child(self._alerts[0], 1)
else:
self.__vbox.reorder_child(self._alert[0], 0)
def __window_realize_cb(self, window):
group = Gtk.Window()
group.realize()
window.get_window().set_group(group.get_window())
def __key_press_cb(self, widget, event):
key = Gdk.keyval_name(event.keyval)
if event.get_state() & Gdk.ModifierType.MOD1_MASK:
if self.tray is not None and key == 'space':
self.tray.props.visible = not self.tray.props.visible
return True
elif key == 'Escape' and self._is_fullscreen and \
self.props.enable_fullscreen_mode:
self.unfullscreen()
return True
return False
def __unfullscreen_button_clicked(self, button):
self.unfullscreen()
def __button_press_event_cb(self, widget, event):
self._show_unfullscreen_button()
return False
def __motion_notify_cb(self, widget, event):
self._show_unfullscreen_button()
return False
def _show_unfullscreen_button(self):
if self._is_fullscreen and self.props.enable_fullscreen_mode:
if not self._unfullscreen_button.props.visible:
self._unfullscreen_button.show()
# Reset the timer
if self._unfullscreen_button_timeout_id is not None:
GObject.source_remove(self._unfullscreen_button_timeout_id)
self._unfullscreen_button_timeout_id = None
self._unfullscreen_button_timeout_id = \
GObject.timeout_add_seconds( \
_UNFULLSCREEN_BUTTON_VISIBILITY_TIMEOUT, \
self.__unfullscreen_button_timeout_cb)
def __unfullscreen_button_timeout_cb(self):
self._unfullscreen_button.hide()
self._unfullscreen_button_timeout_id = None
return False
def __request_clear_area_cb(self, activity, osk_rect, cursor_rect):
self._clear_area_dy = cursor_rect.y + cursor_rect.height - osk_rect.y
if self._clear_area_dy < 0:
self._clear_area_dy = 0
return False
self.queue_resize()
return True
def __unset_clear_area_cb(self, activity, snap_back):
self._clear_area_dy = 0
self.queue_resize()
return True
def __size_allocate_cb(self, widget, allocation):
self.set_allocation(allocation)
allocation.y -= self._clear_area_dy
self.__vbox.size_allocate(allocation)
def set_enable_fullscreen_mode(self, enable_fullscreen_mode):
self._enable_fullscreen_mode = enable_fullscreen_mode
def get_enable_fullscreen_mode(self):
return self._enable_fullscreen_mode
enable_fullscreen_mode = GObject.property(type=object,
setter=set_enable_fullscreen_mode, getter=get_enable_fullscreen_mode)
|
lgpl-2.1
| -8,212,694,695,301,188,000
| 32.787037
| 77
| 0.603636
| false
| 3.803683
| false
| false
| false
|
YuepengGuo/backtrader
|
docs/observers-and-statistics/observers-default.py
|
1
|
1323
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
import backtrader.feeds as btfeeds
if __name__ == '__main__':
cerebro = bt.Cerebro(stdstats=False)
cerebro.addstrategy(bt.Strategy)
data = bt.feeds.BacktraderCSVData(dataname='../../datas/2006-day-001.txt')
cerebro.adddata(data)
cerebro.run()
cerebro.plot()
|
gpl-3.0
| -4,085,089,415,937,773,000
| 36.8
| 79
| 0.628118
| false
| 4.021277
| false
| false
| false
|
mic4ael/indico
|
indico/util/suggestions.py
|
1
|
5843
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import division, print_function, unicode_literals
from collections import defaultdict
from datetime import date, timedelta
from sqlalchemy.orm import joinedload, load_only
from indico.modules.events import Event
from indico.modules.events.abstracts.util import get_events_with_abstract_persons
from indico.modules.events.contributions.util import get_events_with_linked_contributions
from indico.modules.events.registration.util import get_events_registered
from indico.modules.events.surveys.util import get_events_with_submitted_surveys
from indico.util.date_time import now_utc, utc_to_server
from indico.util.struct.iterables import window
def _get_blocks(events, attended):
blocks = []
block = []
for event in events:
if event not in attended:
if block:
blocks.append(block)
block = []
continue
block.append(event)
if block:
blocks.append(block)
return blocks
def _query_categ_events(categ, start_dt, end_dt):
return (Event.query
.with_parent(categ)
.filter(Event.happens_between(start_dt, end_dt))
.options(load_only('id', 'start_dt', 'end_dt')))
def _get_category_score(user, categ, attended_events, debug=False):
if debug:
print(repr(categ))
# We care about events in the whole timespan where the user attended some events.
# However, this might result in some missed events e.g. if the user was not working for
# a year and then returned. So we throw away old blocks (or rather adjust the start time
# to the start time of the newest block)
first_event_date = attended_events[0].start_dt.replace(hour=0, minute=0)
last_event_date = attended_events[-1].start_dt.replace(hour=0, minute=0) + timedelta(days=1)
blocks = _get_blocks(_query_categ_events(categ, first_event_date, last_event_date), attended_events)
for a, b in window(blocks):
# More than 3 months between blocks? Ignore the old block!
if b[0].start_dt - a[-1].start_dt > timedelta(weeks=12):
first_event_date = b[0].start_dt.replace(hour=0, minute=0)
# Favorite categories get a higher base score
score = int(categ in user.favorite_categories)
if debug:
print('{0:+.3f} - initial'.format(score))
# Attendance percentage goes to the score directly. If the attendance is high chances are good that the user
# is either very interested in whatever goes on in the category or it's something he has to attend regularily.
total = _query_categ_events(categ, first_event_date, last_event_date).count()
if total:
attended_block_event_count = sum(1 for e in attended_events if e.start_dt >= first_event_date)
score += attended_block_event_count / total
if debug:
print('{0:+.3f} - attendance'.format(score))
# If there are lots/few unattended events after the last attended one we also update the score with that
total_after = _query_categ_events(categ, last_event_date + timedelta(days=1), None).count()
if total_after < total * 0.05:
score += 0.25
elif total_after > total * 0.25:
score -= 0.5
if debug:
print('{0:+.3f} - unattended new events'.format(score))
# Lower the score based on how long ago the last attended event was if there are no future events
# We start applying this modifier only if the event has been more than 40 days in the past to avoid
# it from happening in case of monthly events that are not created early enough.
days_since_last_event = (date.today() - last_event_date.date()).days
if days_since_last_event > 40:
score -= 0.025 * days_since_last_event
if debug:
print('{0:+.3f} - days since last event'.format(score))
# For events in the future however we raise the score
now_local = utc_to_server(now_utc())
attending_future = (_query_categ_events(categ, now_local, last_event_date)
.filter(Event.id.in_(e.id for e in attended_events))
.all())
if attending_future:
score += 0.25 * len(attending_future)
if debug:
print('{0:+.3f} - future event count'.format(score))
days_to_future_event = (attending_future[0].start_dt.date() - date.today()).days
score += max(0.1, -(max(0, days_to_future_event - 2) / 4) ** (1 / 3) + 2.5)
if debug:
print('{0:+.3f} - days to next future event'.format(score))
return score
def get_category_scores(user, debug=False):
# XXX: check if we can add some more roles such as 'contributor' to assume attendance
event_ids = set()
event_ids.update(id_
for id_, roles in get_events_with_abstract_persons(user).iteritems()
if 'abstract_submitter' in roles)
event_ids.update(id_
for id_, roles in get_events_with_linked_contributions(user).iteritems()
if 'contribution_submission' in roles)
event_ids |= get_events_registered(user)
event_ids |= get_events_with_submitted_surveys(user)
if not event_ids:
return {}
attended = (Event.query
.filter(Event.id.in_(event_ids), ~Event.is_deleted)
.options(joinedload('category'))
.order_by(Event.start_dt, Event.id)
.all())
categ_events = defaultdict(list)
for event in attended:
categ_events[event.category].append(event)
return dict((categ, _get_category_score(user, categ, events, debug))
for categ, events in categ_events.iteritems())
|
mit
| 1,942,233,109,239,338,800
| 45.007874
| 114
| 0.655827
| false
| 3.638232
| false
| false
| false
|
coreequip/xbmc-addon-service-watchedlist
|
service.py
|
1
|
62979
|
"""
This file contains the class of the addon
Settings for this addon:
w_movies
'true', 'false': save watched state of movies
w_episodes
'true', 'false': save watched state of movies
autostart
delay
delay after startup in minutes: '0', '5', '10', ...
starttype
'0' = No autostart
'1' = One Execution after xbmc start
'2' = Periodic start of the addon
interval
watch_user
progressdialog
db_format
'0' = SQLite File
'1' = MYSQL Server
extdb
'true', 'false': Use external database file
dbpath
String: Specify path to external database file
dbfilename
dbbackup
mysql_server
mysql_port
mysql_db
mysql_user
mysql_pass
"""
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
import re
import sys, os
import unicodedata
import time
import sqlite3
import mysql.connector
import buggalo
buggalo.GMAIL_RECIPIENT = "msahadl60@gmail.com"
# buggalo.SUBMIT_URL = 'http://msahadl.ms.funpic.de/buggalo-web/submit.php'
import resources.lib.utils as utils
if utils.getSetting('dbbackup') == 'true':
import zipfile
import datetime
#
class WatchedList:
"""
Main class of the add-on
"""
def __init__(self):
"""
Initialize Class, default values for all class variables
"""
self.watchedmovielist_wl = list([]) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6lastChange
self.watchedepisodelist_wl = list([]) # 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5empty, 6lastChange
self.watchedmovielist_xbmc = list([]) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
self.watchedepisodelist_xbmc = list([]) # 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5name, 6empty, 7episodeid
self.tvshows = {} # dict: key=xbmcid, value=[imdbnumber, showname]
self.tvshownames = {} #dict: key=imdbnumber, value=showname
self.sqlcon = 0
self.sqlcursor = 0
self.db_method = 'file' # either 'file' or 'mysql'
# flag to remember copying the databasefile if requested
self.dbcopydone = False
self.watch_user_changes_count = 0
# normal access of files or access over the xbmc virtual file system (on unix)
self.dbfileaccess = 'normal'
self.dbpath = ''
self.dbdirectory = ''
def runProgram(self):
"""Main function to call other functions
infinite loop for periodic database update
Returns:
return codes:
0 success
3 error/exit
"""
try:
# workaround to disable autostart, if requested
if utils.getSetting("autostart") == 'false':
return 0
utils.buggalo_extradata_settings()
utils.footprint()
# wait the delay time after startup
delaytime = float(utils.getSetting("delay")) * 60 # in seconds
utils.log(u'Delay time before execution: %d seconds' % delaytime, xbmc.LOGDEBUG)
utils.showNotification(utils.getString(32101), utils.getString(32004)%float(utils.getSetting("delay")))
if utils.sleepsafe(delaytime):
return 0
# load all databases
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
utils.showNotification(utils.getString(32102), utils.getString(32601))
return 3
if len(self.tvshownames) == 0: self.sync_tvshows()
if len(self.watchedmovielist_wl) == 0: self.get_watched_wl(1)
if len(self.watchedmovielist_xbmc) == 0: self.get_watched_xbmc(1)
executioncount = 0
idletime = 0
if utils.getSetting("watch_user") == 'true': utils.showNotification(utils.getString(32101), utils.getString(32005))
# handle the periodic execution
while float(utils.getSetting("starttype")) > 0 or utils.getSetting("watch_user") == 'true':
starttime = time.time()
# determine sleeptime before next full watched-database update
if utils.getSetting("starttype") == '1' and executioncount == 0: # one execution after startup
sleeptime = 0
elif utils.getSetting("starttype") == '2': # periodic execution
if executioncount == 0: # with periodic execution, one update after startup and then periodic
sleeptime = 0
else:
sleeptime = float(utils.getSetting("interval")) * 3600 # wait interval until next startup in [seconds]
# wait and then update again
utils.log(u'wait %d seconds until next update' % sleeptime)
utils.showNotification(utils.getString(32101), utils.getString(32003)%(sleeptime/3600))
else: # no autostart, only watch user
sleeptime = 3600 # arbitrary time for infinite loop
# workaround to sleep the requested time. When using the sleep-function, xbmc can not exit
while 1:
if xbmc.abortRequested: return 1
# check if user changes arrived
if utils.getSetting("watch_user") == 'true':
idletime_old = idletime
idletime = xbmc.getGlobalIdleTime() # xbmc idletime in seconds
# check if user could have made changes and process these changes to the wl database
self.watch_user_changes(idletime_old, idletime)
# check if time for update arrived
if time.time() > starttime + sleeptime:
break
xbmc.sleep(1000) # wait 1 second until next check if xbmc terminates
# perform full update
if utils.getSetting("starttype") == '1' and executioncount == 0 or utils.getSetting("starttype") == '2':
self.runUpdate(False)
executioncount += 1
# check for exiting program
if float(utils.getSetting("starttype")) < 2 and utils.getSetting("watch_user") == 'false':
return 0 # the program may exit. No purpose for background process
return 0
except:
buggalo.onExceptionRaised()
def runUpdate(self, manualstart):
"""entry point for manual start.
perform the update step by step
Args:
manualstart: True if called manually
Returns:
return code:
0 success
3 Error opening database
4 Error getting watched state from addon database
5 Error getting watched state from xbmc database
6 Error writing WL Database
7 Error writing XBMC database
"""
try:
utils.buggalo_extradata_settings()
# check if player is running before doing the update. Only do this check for automatic start
while xbmc.Player().isPlaying() == True and not manualstart:
if utils.sleepsafe(60*1000): return 1 # wait one minute until next check for active playback
if xbmc.Player().isPlaying() == False:
if utils.sleepsafe(180*1000): return 1 # wait 3 minutes so the dialogue does not pop up directly after the playback ends
# load the addon-database
if self.load_db(True): # True: Manual start
utils.showNotification(utils.getString(32102), utils.getString(32601))
return 3
if self.sync_tvshows():
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 5
# get the watched state from the addon
if self.get_watched_wl(0):
utils.showNotification(utils.getString(32102), utils.getString(32602))
return 4
# get watched state from xbmc
if self.get_watched_xbmc(0):
utils.showNotification(utils.getString(32102), utils.getString(32603))
return 5
if self.sync_tvshows():
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 5
# import from xbmc into addon database
res = self.write_wl_wdata()
if res == 2: # user exit
return 0
elif res == 1: # error
utils.showNotification(utils.getString(32102), utils.getString(32604))
return 6
# close the sqlite database (addon)
self.close_db() # should be closed by the functions directly accessing the database
# export from addon database into xbmc database
res = self.write_xbmc_wdata((utils.getSetting("progressdialog") == 'true'), 2)
if res == 2: # user exit
return 0
elif res == 1: # error
utils.showNotification(utils.getString(32102), utils.getString(32605))
return 7
utils.showNotification(utils.getString(32101), utils.getString(32107))
utils.log(u'runUpdate exited with success', xbmc.LOGDEBUG)
return 0
except:
buggalo.onExceptionRaised()
def load_db(self, manualstart=False):
"""Load WL database
Args:
manualstart: True if called manually; only retry opening db once
Returns:
return code:
0 successfully opened database
1 error
2 shutdown (serious error in subfunction)
"""
try:
if utils.getSetting("db_format") == '0':
# SQlite3 database in a file
# load the db path
if utils.getSetting("extdb") == 'false':
# use the default file
self.dbdirectory = xbmc.translatePath( utils.data_dir() ).decode('utf-8')
buggalo.addExtraData('dbdirectory', self.dbdirectory);
self.dbpath = os.path.join( self.dbdirectory , "watchedlist.db" )
else:
wait_minutes = 1 # retry waittime if db path does not exist/ is offline
while xbmc.abortRequested == False:
# use a user specified file, for example to synchronize multiple clients
self.dbdirectory = xbmc.translatePath( utils.getSetting("dbpath") ).decode('utf-8')
self.dbfileaccess = utils.fileaccessmode(self.dbdirectory)
self.dbdirectory = utils.translateSMB(self.dbdirectory)
self.dbpath = os.path.join( self.dbdirectory , utils.getSetting("dbfilename").decode('utf-8') )
# xbmc.validatePath(self.dbdirectory) # does not work for smb
if not xbmcvfs.exists(self.dbdirectory): # do not use os.path.exists to access smb:// paths
if manualstart:
utils.log(u'db path does not exist: %s' % self.dbdirectory, xbmc.LOGWARNING)
return 1 # error
else:
utils.log(u'db path does not exist, wait %d minutes: %s' % (wait_minutes, self.dbdirectory), xbmc.LOGWARNING)
utils.showNotification(utils.getString(32102), utils.getString(32002) % self.dbdirectory )
# Wait "wait_minutes" minutes until next check for file path (necessary on network shares, that are offline)
wait_minutes += wait_minutes # increase waittime until next check
if utils.sleepsafe(wait_minutes*60): return 2
else:
break # directory exists, continue below
# on unix, smb-shares can not be accessed with sqlite3 --> copy the db with xbmc file system operations and work in mirror directory
buggalo.addExtraData('dbfileaccess', self.dbfileaccess);
buggalo.addExtraData('dbdirectory', self.dbdirectory);
buggalo.addExtraData('dbpath', self.dbpath);
if self.dbfileaccess == 'copy':
self.dbdirectory_copy = self.dbdirectory
self.dbpath_copy = self.dbpath # path to db file as in the settings (but not accessable)
buggalo.addExtraData('dbdirectory_copy', self.dbdirectory_copy);
buggalo.addExtraData('dbpath_copy', self.dbpath_copy);
# work in copy directory in the xbmc profile folder
self.dbdirectory = os.path.join( xbmc.translatePath( utils.data_dir() ).decode('utf-8'), 'dbcopy')
if not xbmcvfs.exists(self.dbdirectory):
xbmcvfs.mkdir(self.dbdirectory)
utils.log(u'created directory %s' % str(self.dbdirectory))
self.dbpath = os.path.join( self.dbdirectory , "watchedlist.db" )
if xbmcvfs.exists(self.dbpath_copy):
success = xbmcvfs.copy(self.dbpath_copy, self.dbpath) # copy the external db file to local mirror directory
utils.log(u'copied db file %s -> %s. Success: %d' % (self.dbpath_copy, self.dbpath, success), xbmc.LOGDEBUG)
buggalo.addExtraData('dbdirectory', self.dbdirectory);
buggalo.addExtraData('dbpath', self.dbpath);
#connect to the database. create database if it does not exist
self.sqlcon = sqlite3.connect(self.dbpath);
self.sqlcursor = self.sqlcon.cursor()
else:
# MySQL Database on a server
self.sqlcon = mysql.connector.connect(user=utils.getSetting("mysql_user"), password=utils.getSetting("mysql_pass"), database=utils.getSetting("mysql_db"), host=utils.getSetting("mysql_server"), port=utils.getSetting("mysql_port"))
self.sqlcursor = self.sqlcon.cursor()
# create tables if they don't exist
if utils.getSetting("db_format") == '0': # sqlite file
sql = "CREATE TABLE IF NOT EXISTS movie_watched (idMovieImdb INTEGER PRIMARY KEY,playCount INTEGER,lastChange INTEGER,lastPlayed INTEGER,title TEXT)"
self.sqlcursor.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS episode_watched (idShow INTEGER, season INTEGER, episode INTEGER, playCount INTEGER,lastChange INTEGER,lastPlayed INTEGER, PRIMARY KEY (idShow, season, episode))"
self.sqlcursor.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS tvshows (idShow INTEGER, title TEXT, PRIMARY KEY (idShow))"
self.sqlcursor.execute(sql)
else: # mysql network database
sql = ("CREATE TABLE IF NOT EXISTS `movie_watched` ("
"`idMovieImdb` int unsigned NOT NULL,"
"`playCount` tinyint unsigned DEFAULT NULL,"
"`lastChange` timestamp NULL DEFAULT NULL,"
"`lastPlayed` timestamp NULL DEFAULT NULL,"
"`title` text,"
"PRIMARY KEY (`idMovieImdb`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
sql = ("CREATE TABLE IF NOT EXISTS `episode_watched` ("
"`idShow` int unsigned NOT NULL DEFAULT '0',"
"`season` smallint unsigned NOT NULL DEFAULT '0',"
"`episode` smallint unsigned NOT NULL DEFAULT '0',"
"`playCount` tinyint unsigned DEFAULT NULL,"
"`lastChange` timestamp NULL DEFAULT NULL,"
"`lastPlayed` timestamp NULL DEFAULT NULL,"
"PRIMARY KEY (`idShow`,`season`,`episode`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
sql = ("CREATE TABLE IF NOT EXISTS `tvshows` ("
"`idShow` int unsigned NOT NULL,"
"`title` text,"
"PRIMARY KEY (`idShow`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8;")
self.sqlcursor.execute(sql)
buggalo.addExtraData('db_connstatus', 'connected')
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u"Database error while opening %s. '%s'" % (self.dbpath, errstring), xbmc.LOGERROR)
self.close_db()
buggalo.addExtraData('db_connstatus', 'sqlite3 error, closed')
return 1
except mysql.connector.Error as err:
# Catch common mysql errors and show them to guide the user
utils.log(u"Database error while opening mySQL DB %s [%s:%s@%s]. %s" % (utils.getSetting("mysql_db"), utils.getSetting("mysql_user"), utils.getSetting("mysql_pass"), utils.getSetting("mysql_db"), err), xbmc.LOGERROR)
if err.errno == mysql.connector.errorcode.ER_DBACCESS_DENIED_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32210) % (utils.getSetting("mysql_user"), utils.getSetting("mysql_db")))
elif err.errno == mysql.connector.errorcode.ER_ACCESS_DENIED_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32208))
elif err.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR:
utils.showNotification(utils.getString(32103), utils.getString(32209) % utils.getSetting("mysql_db") )
buggalo.addExtraData('db_connstatus', 'mysql error, closed')
self.close_db()
return 1
except:
utils.log(u"Error while opening %s: %s" % (self.dbpath, sys.exc_info()[2]), xbmc.LOGERROR)
self.close_db()
buggalo.addExtraData('dbpath', self.dbpath)
buggalo.addExtraData('db_connstatus', 'error, closed')
buggalo.onExceptionRaised()
return 1
# only commit the changes if no error occured to ensure database persistence
self.sqlcon.commit()
return 0
def close_db(self):
"""Close WL database
Returns:
return code:
0 successfully closed database
1 error
"""
if self.sqlcon:
self.sqlcon.close()
self.sqlcon = 0
# copy the db file back to the shared directory, if needed
if utils.getSetting("db_format") == '0' and self.dbfileaccess == 'copy':
if xbmcvfs.exists(self.dbpath):
success = xbmcvfs.copy(self.dbpath, self.dbpath_copy)
utils.log(u'copied db file %s -> %s. Success: %d' % (self.dbpath, self.dbpath_copy, success), xbmc.LOGDEBUG)
if not success:
utils.showNotification(utils.getString(32102), utils.getString(32606) % self.dbpath )
return 1
buggalo.addExtraData('db_connstatus', 'closed')
return 0
# cursor is not changed -> error
def get_watched_xbmc(self, silent):
"""Get Watched States of XBMC Database
Args:
silent: Do not show notifications if True
Returns:
return code:
0 success
1 error
"""
try:
############################################
# first tv shows with TheTVDB-ID, then tv episodes
if utils.getSetting("w_episodes") == 'true':
############################################
# get imdb tv-show id from xbmc database
utils.log(u'get_watched_xbmc: Get all episodes from xbmc database', xbmc.LOGDEBUG)
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetTVShows",
"params": {
"properties": ["title", "imdbnumber"],
"sort": { "order": "ascending", "method": "title" }
},
"id": 1})
if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key('tvshows'):
for item in json_response['result']['tvshows']:
tvshowId_xbmc = int(item['tvshowid'])
try:
# check if series number is in imdb-format (scraper=imdb?)
res = re.compile('tt(\d+)').findall(item['imdbnumber'])
if len(res) == 0:
# number in thetvdb-format
tvshowId_imdb = int(item['imdbnumber'])
else:
# number in imdb-format
tvshowId_imdb = int(res[0])
except:
utils.log(u'get_watched_xbmc: tv show "%s" has no imdb-number in database. tvshowid=%d Try rescraping.' % (item['title'], tvshowId_xbmc), xbmc.LOGDEBUG)
continue
self.tvshows[tvshowId_xbmc] = list([tvshowId_imdb, item['title']])
self.tvshownames[tvshowId_imdb] = item['title']
# Get all watched movies and episodes by unique id from xbmc-database via JSONRPC
self.watchedmovielist_xbmc = list([])
self.watchedepisodelist_xbmc = list([])
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'get_watched_xbmc: Get all %ss from xbmc database' % modus, xbmc.LOGDEBUG)
if modus == 'movie':
# use the JSON-RPC to access the xbmc-database.
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovies",
"params": {
"properties": ["title", "year", "imdbnumber", "lastplayed", "playcount"],
"sort": { "order": "ascending", "method": "title" }
},
"id": 1
})
else:
json_response = utils.executeJSON({
"jsonrpc": "2.0",
"method": "VideoLibrary.GetEpisodes",
"params": {
"properties": ["tvshowid", "season", "episode", "playcount", "showtitle", "lastplayed"]
},
"id": 1
})
if modus == 'movie': searchkey = 'movies'
else: searchkey = 'episodes'
if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key(searchkey):
# go through all watched movies and save them in the class-variable self.watchedmovielist_xbmc
for item in json_response['result'][searchkey]:
if modus == 'movie':
name = item['title'] + ' (' + str(item['year']) + ')'
res = re.compile('tt(\d+)').findall(item['imdbnumber'])
if len(res) == 0:
# no imdb-number for this movie in database. Skip
utils.log(u'get_watched_xbmc: Movie %s has no imdb-number in database. movieid=%d Try rescraping' % (name, int(item['movieid'])), xbmc.LOGDEBUG)
continue
imdbId = int(res[0])
else: # episodes
tvshowId_xbmc = item['tvshowid']
name = '%s S%02dE%02d' % (item['showtitle'], item['season'], item['episode'])
try:
tvshowId_imdb = self.tvshows[tvshowId_xbmc][0]
except:
utils.log(u'get_watched_xbmc: xbmc tv showid %d is not in table xbmc-tvshows. Skipping %s' % (item['tvshowid'], name), xbmc.LOGWARNING)
continue
lastplayed = utils.sqlDateTimeToTimeStamp(item['lastplayed']) # convert to integer-timestamp
playcount = int(item['playcount'])
# add data to the class-variables
if modus == 'movie':
self.watchedmovielist_xbmc.append(list([imdbId, 0, 0, lastplayed, playcount, name, 0, int(item['movieid'])]))# 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
else:
self.watchedepisodelist_xbmc.append(list([tvshowId_imdb, int(item['season']), int(item['episode']), lastplayed, playcount, name, 0, int(item['episodeid'])]))
if not silent: utils.showNotification( utils.getString(32101), utils.getString(32299)%(len(self.watchedmovielist_xbmc), len(self.watchedepisodelist_xbmc)) )
return 0
except:
utils.log(u'get_watched_xbmc: error getting the xbmc database : %s' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
def get_watched_wl(self, silent):
"""Get Watched States of WL Database
Args:
silent: Do not show notifications if True
Returns:
return code:
0 successfully got watched states from WL-database
1 unknown error (programming related)
2 shutdown (error in subfunction)
3 error related to opening the database
"""
try:
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
# get watched movies from addon database
self.watchedmovielist_wl = list([])
if utils.getSetting("w_movies") == 'true':
utils.log(u'get_watched_wl: Get watched movies from WL database', xbmc.LOGDEBUG)
if utils.getSetting("db_format") == '0': # SQLite3 File. Timestamp stored as integer
self.sqlcursor.execute("SELECT idMovieImdb, lastPlayed, playCount, title, lastChange FROM movie_watched ORDER BY title")
else: # mySQL: Create integer timestamp with the request
self.sqlcursor.execute("SELECT `idMovieImdb`, UNIX_TIMESTAMP(`lastPlayed`), `playCount`, `title`, UNIX_TIMESTAMP(`lastChange`) FROM `movie_watched` ORDER BY `title`")
rows = self.sqlcursor.fetchall()
for row in rows:
self.watchedmovielist_wl.append(list([int(row[0]), 0, 0, int(row[1]), int(row[2]), row[3], int(row[4])])) # 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6lastChange
# get watched episodes from addon database
self.watchedepisodelist_wl = list([])
if utils.getSetting("w_episodes") == 'true':
utils.log(u'get_watched_wl: Get watched episodes from WL database', xbmc.LOGDEBUG)
if utils.getSetting("db_format") == '0': # SQLite3 File. Timestamp stored as integer
self.sqlcursor.execute("SELECT idShow, season, episode, lastPlayed, playCount, lastChange FROM episode_watched ORDER BY idShow, season, episode")
else: # mySQL: Create integer timestamp with the request
self.sqlcursor.execute("SELECT `idShow`, `season`, `episode`, UNIX_TIMESTAMP(`lastPlayed`), `playCount`, UNIX_TIMESTAMP(`lastChange`) FROM `episode_watched` ORDER BY `idShow`, `season`, `episode`")
rows = self.sqlcursor.fetchall()
for row in rows:
try:
name = '%s S%02dE%02d' % (self.tvshownames[int(row[0])], int(row[1]), int(row[2]))
except:
name = 'tvdb-id %d S%02dE%02d' % (int(row[0]), int(row[1]), int(row[2]))
self.watchedepisodelist_wl.append(list([int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), name, int(row[5])]))# 0imdbnumber, 1season, 2episode, 3lastplayed, 4playcount, 5name, 6lastChange
if not silent: utils.showNotification(utils.getString(32101), utils.getString(32298)%(len(self.watchedmovielist_wl), len(self.watchedepisodelist_wl)))
self.close_db()
return 0
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'get_watched_wl: SQLite Database error getting the wl database. %s' % errstring, xbmc.LOGERROR)
self.close_db()
# error could be that the database is locked (for tv show strings). This is not an error to disturb the other functions
return 3
except mysql.connector.Error as err:
utils.log(u'get_watched_wl: MySQL Database error getting the wl database. %s' % err, xbmc.LOGERROR)
return 3
except:
utils.log(u'get_watched_wl: Error getting the wl database : %s' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
def sync_tvshows(self):
"""Sync List of TV Shows between WL and XBMC Database
Returns:
return code:
0 successfully synched tv shows
1 database access error
2 database loading error
"""
try:
utils.log(u'sync_tvshows: sync tvshows with wl database : %s' % sys.exc_info()[2], xbmc.LOGDEBUG)
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
# write eventually new tv shows to wl database
for xbmcid in self.tvshows:
if utils.getSetting("db_format") == '0': # sqlite3
sql = "INSERT OR IGNORE INTO tvshows (idShow,title) VALUES (?, ?)"
else: # mysql
sql = "INSERT IGNORE INTO tvshows (idShow,title) VALUES (%s, %s)"
values = self.tvshows[xbmcid]
self.sqlcursor.execute(sql, values)
self.database_copy()
self.sqlcon.commit()
# get all known tv shows from wl database
self.sqlcursor.execute("SELECT idShow, title FROM tvshows")
rows = self.sqlcursor.fetchall()
for i in range(len(rows)):
self.tvshownames[int(rows[i][0])] = rows[i][1]
self.close_db()
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'sync_tvshows: SQLite Database error accessing the wl database: ''%s''' % errstring, xbmc.LOGERROR)
self.close_db()
# error could be that the database is locked (for tv show strings).
return 1
except mysql.connector.Error as err:
utils.log(u"sync_tvshows: MySQL Database error accessing the wl database: ''%s''" % (err), xbmc.LOGERROR)
self.close_db()
return 1
except:
utils.log(u'sync_tvshows: Error getting the wl database: ''%s''' % sys.exc_info()[2], xbmc.LOGERROR)
self.close_db()
buggalo.onExceptionRaised()
return 1
return 0
def write_wl_wdata(self):
"""Go through all watched movies from xbmc and check whether they are up to date in the addon database
Returns:
return code:
0 successfully written WL
1 program exception
2 database loading error
"""
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'write_wl_wdata: Write watched %ss to WL database' % modus, xbmc.LOGDEBUG)
count_insert = 0
count_update = 0
if utils.getSetting("progressdialog") == 'true':
DIALOG_PROGRESS = xbmcgui.DialogProgress()
DIALOG_PROGRESS.create( utils.getString(32101) , utils.getString(32105))
if modus == 'movie':
list_length = len(self.watchedmovielist_xbmc)
else:
list_length = len(self.watchedepisodelist_xbmc)
for i in range(list_length):
if xbmc.abortRequested: break # this loop can take some time in debug mode and prevents xbmc exit
if utils.getSetting("progressdialog") == 'true' and DIALOG_PROGRESS.iscanceled():
if modus == 'movie': strno = 32202
else: strno = 32203;
utils.showNotification(utils.getString(strno), utils.getString(32301)%(count_insert, count_update))
return 2
if modus == 'movie':
row_xbmc = self.watchedmovielist_xbmc[i]
else:
row_xbmc = self.watchedepisodelist_xbmc[i]
if utils.getSetting("progressdialog") == 'true':
DIALOG_PROGRESS.update(100*(i+1)/list_length, utils.getString(32105), utils.getString(32610) % (i+1, list_length, row_xbmc[5]) )
try:
count = self.wl_update_media(modus, row_xbmc, 0, 0)
count_insert += count[0]; count_update += count[1];
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'write_wl_wdata: SQLite Database error ''%s'' while updating %s %s' % (errstring, modus, row_xbmc[5]), xbmc.LOGERROR)
# error at this place is the result of duplicate movies, which produces a DUPLICATE PRIMARY KEY ERROR
return 1
except mysql.connector.Error as err:
utils.log(u'write_wl_wdata: MySQL Database error ''%s'' while updating %s %s' % (err, modus, row_xbmc[5]), xbmc.LOGERROR)
self.close_db()
return 1 # error while writing. Do not continue with episodes, if movies raised an exception
except:
utils.log(u'write_wl_wdata: Error while updating %s %s: %s' % (modus, row_xbmc[5], sys.exc_info()[2]), xbmc.LOGERROR)
self.close_db()
if utils.getSetting("progressdialog") == 'true': DIALOG_PROGRESS.close()
buggalo.addExtraData('count_update', count_update); buggalo.addExtraData('count_insert', count_insert);
buggalo.onExceptionRaised()
return 1
if utils.getSetting("progressdialog") == 'true': DIALOG_PROGRESS.close()
# only commit the changes if no error occured to ensure database persistence
if count_insert > 0 or count_update > 0:
self.database_copy()
self.sqlcon.commit()
if modus == 'movie': strno = [32202, 32301]
else: strno = [32203, 32301];
utils.showNotification(utils.getString(strno[0]), utils.getString(strno[1])%(count_insert, count_update))
self.close_db()
return 0
def write_xbmc_wdata(self, progressdialogue, notifications):
"""Go through all watched movies/episodes from the wl-database and check,
if the xbmc-database is up to date
Args:
progressdialogue: Show Progress Bar if True
notifications: 0= no, 1=only changed info, 2=all
Returns:
return code:
0 successfully written XBMC database
1 program exception
2 cancel by user interaction
"""
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
utils.log(u'write_xbmc_wdata: Write watched %ss to xbmc database (pd=%d, noti=%d)' % (modus, progressdialogue, notifications), xbmc.LOGDEBUG)
count_update = 0
if progressdialogue:
DIALOG_PROGRESS = xbmcgui.DialogProgress()
DIALOG_PROGRESS.create( utils.getString(32101), utils.getString(32106))
# list to iterate over
if modus == 'movie':
list_length = len(self.watchedmovielist_wl)
else:
list_length = len(self.watchedepisodelist_wl)
# iterate over wl-list
for j in range(list_length):
if xbmc.abortRequested: break # this loop can take some time in debug mode and prevents xbmc exit
if progressdialogue and DIALOG_PROGRESS.iscanceled():
if notifications > 0: utils.showNotification(utils.getString(32204), utils.getString(32302)%(count_update))
return 2
# get media-specific list items
if modus == 'movie':
row_wl = self.watchedmovielist_wl[j]
else:
row_wl = self.watchedepisodelist_wl[j]
season = row_wl[1]
episode = row_wl[2]
imdbId = row_wl[0]
name = row_wl[5]
if progressdialogue:
DIALOG_PROGRESS.update(100*(j+1)/list_length, utils.getString(32106), utils.getString(32610) % (j+1, list_length, name) )
try:
# search the unique movie/episode id in the xbmc-list
if modus == 'movie':
indices = [i for i, x in enumerate(self.watchedmovielist_xbmc) if x[0] == imdbId] # the movie can have multiple entries in xbmc
else:
indices = [i for i, x in enumerate(self.watchedepisodelist_xbmc) if x[0] == imdbId and x[1] == season and x[2] == episode]
lastplayed_wl = row_wl[3]
playcount_wl = row_wl[4]
lastchange_wl = row_wl[6]
if len(indices) > 0:
# the movie/episode is already in the xbmc-list
for i in indices:
if modus == 'movie':
row_xbmc = self.watchedmovielist_xbmc[i]
else:
row_xbmc = self.watchedepisodelist_xbmc[i]
lastplayed_xbmc = row_xbmc[3]
playcount_xbmc = row_xbmc[4]
change_xbmc_db = False
# check if movie/episode is set as unwatched in the wl database
if playcount_wl != playcount_xbmc and lastchange_wl > lastplayed_xbmc:
change_xbmc_db = True
# compare playcount and lastplayed (update if xbmc data is older)
if playcount_xbmc < playcount_wl or (lastplayed_xbmc < lastplayed_wl and playcount_wl > 0):
change_xbmc_db = True
if not change_xbmc_db:
if utils.getSetting("debug") == 'true':
# utils.log(u'write_xbmc_wdata: xbmc database up-to-date for tt%d, %s' % (imdbId, row_xbmc[2]), xbmc.LOGDEBUG)
pass
continue
# check if the lastplayed-timestamp in wl is useful
if playcount_wl == 0:
lastplayed_new = 0
else:
if lastplayed_wl == 0:
lastplayed_new = lastplayed_xbmc
else:
lastplayed_new = lastplayed_wl
# update database
mediaid = row_xbmc[7]
if modus == 'movie': jsonmethod = "VideoLibrary.SetMovieDetails"; idfieldname = "movieid"
else: jsonmethod = "VideoLibrary.SetEpisodeDetails"; idfieldname = "episodeid"
jsondict = {
"jsonrpc": "2.0",
"method": jsonmethod,
"params": {idfieldname: mediaid, "playcount": playcount_wl, "lastplayed": utils.TimeStamptosqlDateTime(lastplayed_new)},
"id": 1
}
json_response = utils.executeJSON(jsondict)
if (json_response.has_key('result') and json_response['result'] == 'OK'):
utils.log(u'write_xbmc_wdata: xbmc database updated for %s. playcount: {%d -> %d}, lastplayed: {"%s" -> "%s"} (%sid=%d)' % (name, playcount_xbmc, playcount_wl, utils.TimeStamptosqlDateTime(lastplayed_xbmc), utils.TimeStamptosqlDateTime(lastplayed_new), modus, mediaid), xbmc.LOGINFO)
if utils.getSetting("debug") == 'true':
if playcount_wl == 0:
if notifications > 0: utils.showNotification(utils.getString(32404), name)
else:
if notifications > 0: utils.showNotification(utils.getString(32401), name)
count_update += 1
# update the xbmc-db-mirror-variable
if modus == 'movie':
self.watchedmovielist_xbmc[i][3] = lastplayed_new
self.watchedmovielist_xbmc[i][4] = playcount_wl
else:
self.watchedepisodelist_xbmc[i][3] = lastplayed_new
self.watchedepisodelist_xbmc[i][4] = playcount_wl
else:
utils.log(u'write_xbmc_wdata: error updating xbmc database. %s. json_response=%s' % (name, str(json_response)), xbmc.LOGERROR)
else:
# the movie is in the watched-list but not in the xbmc-list -> no action
# utils.log(u'write_xbmc_wdata: movie not in xbmc database: tt%d, %s' % (imdbId, row_xbmc[2]), xbmc.LOGDEBUG)
continue
except:
utils.log(u"write_xbmc_wdata: Error while updating %s %s: %s" % (modus, name, sys.exc_info()[2]), xbmc.LOGERROR)
if progressdialogue: DIALOG_PROGRESS.close()
buggalo.addExtraData('count_update', count_update);
buggalo.onExceptionRaised()
return 1
if progressdialogue: DIALOG_PROGRESS.close()
if notifications > 1:
if modus == 'movie': strno = [32204, 32302]
else: strno = [32205, 32303];
utils.showNotification(utils.getString(strno[0]), utils.getString(strno[1])%(count_update))
return 0
def database_copy(self):
"""create a copy of the database, in case something goes wrong (only if database file is used)
Returns:
return code:
0 successfully copied database
1 file writing error
2 program exception
"""
if utils.getSetting("db_format") != '0':
return 0 # no backup needed since we are using mysql database
if utils.getSetting('dbbackup') == 'false':
return 0 # no backup requested in the addon settings
if not self.dbcopydone:
if not xbmcvfs.exists(self.dbpath):
utils.log(u'database_copy: directory %s does not exist. No backup possible.' % self.dbpath, xbmc.LOGERROR)
return 1
now = datetime.datetime.now()
timestr = u'%04d%02d%02d_%02d%02d%02d' % (now.year, now.month, now.day, now.hour, now.minute, now.second)
zipfilename = os.path.join(self.dbdirectory, utils.decode(timestr + u'-watchedlist.db.zip'))
zf = False
try:
zf = zipfile.ZipFile(zipfilename, 'w')
zf.write(self.dbpath, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
self.dbcopydone = True
utils.log(u'database_copy: database backup copy created to %s' % zipfilename, xbmc.LOGINFO)
# copy the zip file with xbmc file system, if needed
if self.dbfileaccess == 'copy':
xbmcvfs.copy(zipfilename, os.path.join(self.dbdirectory_copy, utils.decode(timestr + u'-watchedlist.db.zip')))
xbmcvfs.delete(zipfilename)
return 0
except:
if zf:
zf.close()
buggalo.addExtraData('zipfilename', zipfilename);
buggalo.onExceptionRaised()
return 2
def watch_user_changes(self, idletime_old, idletime):
"""check if the user made changes in the watched states. Especially setting movies as "not watched".
This can not be recognized by the other functions
Args:
idletime_old: Old Idle Time
idletime: New Idle Time
"""
if xbmc.Player().isPlaying() == True:
return
if idletime > idletime_old:
# the idle time increased. No user interaction probably happened
return
utils.log(u'watch_user_changes: Check for user changes (no. %d)' % self.watch_user_changes_count, xbmc.LOGDEBUG)
self.watch_user_changes_count = self.watch_user_changes_count + 1
# save previous state
old_watchedmovielist_xbmc = self.watchedmovielist_xbmc
old_watchedepisodelist_xbmc = self.watchedepisodelist_xbmc
# get new state
self.get_watched_xbmc(1)
#save exception information
buggalo.addExtraData('len_old_watchedmovielist_xbmc', len(old_watchedmovielist_xbmc))
buggalo.addExtraData('len_old_watchedepisodelist_xbmc', len(old_watchedepisodelist_xbmc))
buggalo.addExtraData('len_self_watchedmovielist_xbmc', len(self.watchedmovielist_xbmc))
buggalo.addExtraData('len_self_watchedepisodelist_xbmc', len(self.watchedepisodelist_xbmc))
# separate the change detection and the change in the database to prevent circle reference
indices_changed = list([])
# compare states of movies/episodes
for modus in ['movie', 'episode']:
buggalo.addExtraData('modus', modus);
if modus == 'movie' and utils.getSetting("w_movies") != 'true':
continue
if modus == 'episode' and utils.getSetting("w_episodes") != 'true':
continue
if modus == 'movie':
list_new = self.watchedmovielist_xbmc
list_old = old_watchedmovielist_xbmc
else:
list_new = self.watchedepisodelist_xbmc
list_old = old_watchedepisodelist_xbmc
if len(list_old) == 0 or len(list_new) == 0:
# one of the lists is empty: nothing to compare. No user changes noticable
continue
for i_n, row_xbmc in enumerate(list_new):
if xbmc.abortRequested: return
mediaid = row_xbmc[7]
lastplayed_new = row_xbmc[3]
playcount_new = row_xbmc[4]
# index of this movie/episode in the old database (before the change by the user)
if (len(list_old) > i_n) and (list_old[i_n][7] == mediaid): i_o = i_n # db did not change
else: # search the movieid
i_o = [i for i, x in enumerate(list_old) if x[7] == mediaid]
if len(i_o) == 0: continue #movie is not in old array
i_o = i_o[0] # convert list to int
lastplayed_old = list_old[i_o][3]
playcount_old = list_old[i_o][4]
if playcount_new != playcount_old or lastplayed_new != lastplayed_old:
if playcount_new == playcount_old and playcount_new == 0:
continue # do not add lastplayed to database, when placount = 0
# The user changed the playcount or lastplayed.
# update wl with new watched state
indices_changed.append([i_n, i_o, row_xbmc])
# go through all movies changed by the user
for icx in indices_changed:
if xbmc.abortRequested: return 1
i_o = icx[1]; row_xbmc = icx[2]
i_n = icx[0];
lastplayed_old = list_old[i_o][3]; playcount_old = list_old[i_o][4];
lastplayed_new = row_xbmc[3]; playcount_new = row_xbmc[4]; mediaid = row_xbmc[7]
utils.log(u'watch_user_changes: %s "%s" changed playcount {%d -> %d} lastplayed {"%s" -> "%s"}. %sid=%d' % (modus, row_xbmc[5], playcount_old, playcount_new, utils.TimeStamptosqlDateTime(lastplayed_old), utils.TimeStamptosqlDateTime(lastplayed_new), modus, mediaid))
try:
self.wl_update_media(modus, row_xbmc, 1, 1)
except sqlite3.Error as e:
try:
errstring = e.args[0] # TODO: Find out, why this does not work some times
except:
errstring = ''
utils.log(u'write_wl_wdata: SQLite Database error (%s) while updating %s %s' % (errstring, modus, row_xbmc[5]))
if utils.getSetting("debug") == 'true':
utils.showNotification(utils.getString(32102), utils.getString(32606) % ('(%s)' % errstring))
# error because of db locked or similar error
self.close_db()
break
except mysql.connector.Error as err:
# Catch common mysql errors and show them to guide the user
utils.log(u'write_wl_wdata: MySQL Database error (%s) while updating %s %s' % (err, modus, row_xbmc[5]))
if utils.getSetting("debug") == 'true':
utils.showNotification(utils.getString(32102), utils.getString(32606) % ('(%s)' % err))
self.close_db()
break
# update xbmc watched status, e.g. to set duplicate movies also as watched
if len(indices_changed) > 0:
self.write_xbmc_wdata(0, 1) # this changes self.watchedmovielist_xbmc
self.close_db() # keep the db closed most of the time (no access problems)
def wl_update_media(self, mediatype, row_xbmc, saveanyway, commit):
"""update the wl database for one movie/episode with the information in row_xbmc.
Args:
mediatype: 'episode' or 'movie'
row_xbmc: One row of the xbmc media table self.watchedmovielist_xbmc.
saveanyway: Skip checks whether not to save the changes
commit: The db change is committed directly (slow with many movies, but safe)
Returns:
return code:
2 error loading database
count_return:
list with 2 entries: ???
"""
buggalo.addExtraData('self_sqlcursor', self.sqlcursor); buggalo.addExtraData('self_sqlcon', self.sqlcon);
buggalo.addExtraData('len_self_watchedmovielist_wl', len(self.watchedmovielist_wl))
buggalo.addExtraData('len_self_watchedepisodelist_wl', len(self.watchedepisodelist_wl))
buggalo.addExtraData('len_self_tvshownames', len(self.tvshownames))
buggalo.addExtraData('row_xbmc', row_xbmc)
buggalo.addExtraData('saveanyway', saveanyway)
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return 2
for modus in [mediatype]:
buggalo.addExtraData('modus', modus)
# row_xbmc: 0imdbnumber, 1empty, 2empty, 3lastPlayed, 4playCount, 5title, 6empty, 7movieid
imdbId = row_xbmc[0]
lastplayed_xbmc = row_xbmc[3]
playcount_xbmc = row_xbmc[4]
name = row_xbmc[5]
if modus == 'episode':
season = row_xbmc[1]
episode = row_xbmc[2]
count_return = list([0, 0])
self.database_copy()
if self.sqlcursor == 0 or self.sqlcon == 0:
if self.load_db():
return count_return
if not saveanyway and playcount_xbmc == 0:
# playcount in xbmc-list is empty. Nothing to save
if utils.getSetting("debug") == 'true':
# utils.log(u'wl_update_%s: not watched in xbmc: tt%d, %s' % (modus, imdbId, name), xbmc.LOGDEBUG)
pass
return count_return
if modus == 'movie':
j = [ii for ii, x in enumerate(self.watchedmovielist_wl) if x[0] == imdbId]
if modus == 'episode':
j = [ii for ii, x in enumerate(self.watchedepisodelist_wl) if x[0] == imdbId and x[1] == season and x[2] == episode]
if len(j) > 0:
j = j[0] # there can only be one valid index j, since only one entry in wl per imdbId
# the movie is already in the watched-list
if modus == 'movie':
row_wl = self.watchedmovielist_wl[j]
else:
row_wl = self.watchedepisodelist_wl[j]
lastplayed_wl = row_wl[3]
playcount_wl = row_wl[4]
lastchange_wl = row_wl[6]
if not saveanyway:
# compare playcount and lastplayed
# check if an update of the wl database is necessary (xbmc watched status newer)
if lastchange_wl > lastplayed_xbmc:
return count_return# no update of WL-db. Return
if playcount_wl >= playcount_xbmc and lastplayed_wl >= lastplayed_xbmc:
if utils.getSetting("debug") == 'true':
# utils.log(u'wl_update_movie: wl database up-to-date for movie tt%d, %s' % (imdbId, moviename), xbmc.LOGDEBUG)
pass
return count_return
# check if the lastplayed-timestamp in xbmc is useful
if lastplayed_xbmc == 0:
lastplayed_new = lastplayed_wl
else:
lastplayed_new = lastplayed_xbmc
else:
lastplayed_new = lastplayed_xbmc
lastchange_new = int(time.time())
if modus == 'movie':
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'UPDATE movie_watched SET playCount = ?, lastplayed = ?, lastChange = ? WHERE idMovieImdb LIKE ?'
else: # mysql
sql = 'UPDATE movie_watched SET playCount = %s, lastplayed = %s, lastChange = FROM_UNIXTIME(%s) WHERE idMovieImdb LIKE %s'
values = list([playcount_xbmc, lastplayed_new, lastchange_new, imdbId])
else:
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'UPDATE episode_watched SET playCount = ?, lastPlayed = ?, lastChange = ? WHERE idShow LIKE ? AND season LIKE ? AND episode LIKE ?'
else: # mysql
sql = 'UPDATE episode_watched SET playCount = %s, lastPlayed = FROM_UNIXTIME(%s), lastChange = FROM_UNIXTIME(%s) WHERE idShow LIKE %s AND season LIKE %s AND episode LIKE %s'
values = list([playcount_xbmc, lastplayed_new, lastchange_new, imdbId, season, episode])
self.sqlcursor.execute(sql, values)
count_return[1] = 1
# update the local mirror variable of the wl database: # 0imdbnumber, season, episode, 3lastPlayed, 4playCount, 5title, 6lastChange
if modus == 'movie':
self.watchedmovielist_wl[j] = list([imdbId, 0, 0, lastplayed_new, playcount_xbmc, name, lastchange_new])
else:
self.watchedepisodelist_wl[j] = list([imdbId, season, episode, lastplayed_new, playcount_xbmc, name, lastchange_new])
if utils.getSetting("debug") == 'true':
utils.log(u'wl_update_%s: updated wl db for "%s" (tt%d). playcount: {%d -> %d}. lastplayed: {"%s" -> "%s"}. lastchange: "%s"' % (modus, name, imdbId, playcount_wl, playcount_xbmc, utils.TimeStamptosqlDateTime(lastplayed_wl), utils.TimeStamptosqlDateTime(lastplayed_new), utils.TimeStamptosqlDateTime(lastchange_new)))
if playcount_xbmc > 0:
utils.showNotification(utils.getString(32403), name)
else:
utils.showNotification(utils.getString(32405), name)
else:
# the movie is not in the watched-list -> insert the movie
# order: idMovieImdb,playCount,lastChange,lastPlayed,title
lastchange_new = int(time.time())
if modus == 'movie':
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'INSERT INTO movie_watched (idMovieImdb,playCount,lastChange,lastPlayed,title) VALUES (?, ?, ?, ?, ?)'
else: # mysql
sql = 'INSERT INTO movie_watched (idMovieImdb,playCount,lastChange,lastPlayed,title) VALUES (%s, %s, FROM_UNIXTIME(%s), FROM_UNIXTIME(%s), %s)'
values = list([imdbId, playcount_xbmc, lastchange_new, lastplayed_xbmc, name])
else:
if utils.getSetting("db_format") == '0': # sqlite3
sql = 'INSERT INTO episode_watched (idShow,season,episode,playCount,lastChange,lastPlayed) VALUES (?, ?, ?, ?, ?, ?)'
else: # mysql
sql = 'INSERT INTO episode_watched (idShow,season,episode,playCount,lastChange,lastPlayed) VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s), FROM_UNIXTIME(%s))'
values = list([imdbId, season, episode, playcount_xbmc, lastchange_new, lastplayed_xbmc])
self.sqlcursor.execute(sql, values)
utils.log(u'wl_update_%s: new entry for wl database: "%s", lastChange="%s", lastPlayed="%s", playCount=%d' % (modus, name, utils.TimeStamptosqlDateTime(lastchange_new), utils.TimeStamptosqlDateTime(lastplayed_xbmc), playcount_xbmc))
count_return[0] = 1
# update the local mirror variable of the wl database
if modus == 'movie':
self.watchedmovielist_wl.append(list([imdbId, 0, 0, lastplayed_xbmc, playcount_xbmc, name, lastchange_new]))
else:
self.watchedepisodelist_wl.append(list([imdbId, season, episode, lastplayed_xbmc, playcount_xbmc, name, lastchange_new]))
if utils.getSetting("debug") == 'true':
if playcount_xbmc > 0:
utils.showNotification(utils.getString(32402), name)
else:
utils.showNotification(utils.getString(32405), name)
if commit:
self.sqlcon.commit()
return count_return
|
gpl-3.0
| -5,481,094,405,210,845,000
| 53.060086
| 337
| 0.529812
| false
| 4.291292
| false
| false
| false
|
wadobo/timebank
|
tinymce/views.py
|
1
|
4408
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
import json
import logging
from django.core import urlresolvers
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from tinymce.compressor import gzip_compressor
from tinymce.widgets import get_language_config
def textareas_js(request, name, lang=None):
"""
Returns a HttpResponse whose content is a Javscript file. The template
is loaded from 'tinymce/<name>_textareas.js' or
'<name>/tinymce_textareas.js'. Optionally, the lang argument sets the
content language.
"""
template_files = (
'tinymce/%s_textareas.js' % name,
'%s/tinymce_textareas.js' % name,
)
template = loader.select_template(template_files)
vars = get_language_config(lang)
vars['content_language'] = lang
context = RequestContext(request, vars)
return HttpResponse(template.render(context),
content_type="application/x-javascript")
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
import enchant
raw = request.raw_post_data
input = json.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language '%s'" % lang)
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unkown spellcheck method: '%s'" % method)
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(json.dumps(output),
content_type='application/json')
def preview(request, name):
"""
Returns a HttpResponse whose content is an HTML file that is used
by the TinyMCE preview plugin. The template is loaded from
'tinymce/<name>_preview.html' or '<name>/tinymce_preview.html'.
"""
template_files = (
'tinymce/%s_preview.html' % name,
'%s/tinymce_preview.html' % name,
)
template = loader.select_template(template_files)
return HttpResponse(template.render(RequestContext(request)),
content_type="text/html")
def flatpages_link_list(request):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links to flatpages.
"""
from django.contrib.flatpages.models import FlatPage
link_list = [(page.title, page.url) for page in FlatPage.objects.all()]
return render_to_link_list(link_list)
def compressor(request):
"""
Returns a GZip-compressed response.
"""
return gzip_compressor(request)
def render_to_link_list(link_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of links suitable for use wit the TinyMCE external_link_list_url
configuration option. The link_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCELinkList', link_list)
def render_to_image_list(image_list):
"""
Returns a HttpResponse whose content is a Javscript file representing a
list of images suitable for use wit the TinyMCE external_image_list_url
configuration option. The image_list parameter must be a list of 2-tuples.
"""
return render_to_js_vardef('tinyMCEImageList', image_list)
def render_to_js_vardef(var_name, var_value):
output = "var %s = %s" % (var_name, json.dumps(var_value))
return HttpResponse(output, content_type='application/x-javascript')
def filebrowser(request):
fb_url = "%s://%s%s" % (request.is_secure() and 'https' or 'http',
request.get_host(), urlresolvers.reverse('fb_browse'))
return render_to_response('tinymce/filebrowser.js', {'fb_url': fb_url},
context_instance=RequestContext(request))
|
agpl-3.0
| 2,562,349,873,354,733,000
| 33.170543
| 79
| 0.667877
| false
| 3.890556
| false
| false
| false
|
markokr/sysca
|
tests/helpers.py
|
1
|
1213
|
import binascii
import os.path
from sysca import api as sysca
_FDIR = os.path.join(os.path.dirname(__file__), "files")
def demo_fn(basename):
return os.path.join(_FDIR, basename)
def demo_data(basename, mode="rb"):
if "b" in mode:
with open(demo_fn(basename), mode) as f:
return f.read().replace(b"\r\n", b"\n")
with open(demo_fn(basename), mode, encoding="utf8") as f:
return f.read().replace("\r\n", "\n")
def demo_raw(basename):
return depem(demo_data(basename))
def depem(data):
if isinstance(data, str):
data = data.encode("ascii")
p1 = data.find(b"-----\n") + 6
p2 = data.find(b"\n-----", p1)
return binascii.a2b_base64(data[p1:p2])
def new_root(ktype="ec", **kwargs):
ca_key = sysca.new_key(ktype)
ca_info = sysca.CertInfo(ca=True, load=ca_key, **kwargs)
ca_cert = sysca.create_x509_cert(ca_key, ca_key.public_key(), ca_info, ca_info, 365)
return ca_key, ca_cert
def new_cert(ca_key, ca_info, ktype="ec", **kwargs):
key = sysca.new_key(ktype)
info = sysca.CertInfo(load=key.public_key(), **kwargs)
cert = sysca.create_x509_cert(ca_key, key.public_key(), info, ca_info, 365)
return key, cert
|
isc
| -8,735,806,898,496,008,000
| 25.955556
| 88
| 0.619126
| false
| 2.750567
| false
| false
| false
|
onitake/Uranium
|
examples/definition_viewer/main.py
|
1
|
2450
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import sys
import os.path
import signal
import traceback
from PyQt5.QtCore import QObject, QUrl, pyqtSlot, pyqtProperty, pyqtSignal
from PyQt5.QtQml import QQmlApplicationEngine, qmlRegisterType
from PyQt5.QtWidgets import QApplication
import UM.Resources
import UM.Settings
import DefinitionTreeModel
class DefinitionLoader(QObject):
def __init__(self, parent = None):
super().__init__(parent)
self._metadata = {}
self._definition_id = ""
@pyqtSlot("QUrl", result = str)
def load(self, file_path):
try:
definition = UM.Settings.DefinitionContainer(file_path.fileName())
dirname = os.path.dirname(file_path.toLocalFile())
UM.Resources.Resources.addSearchPath(dirname)
UM.Resources.Resources.addSearchPath(os.path.realpath(os.path.join(dirname, "..")))
with open(file_path.toLocalFile()) as data:
definition.deserialize(data.read())
self._metadata = dict(definition.metaData)
self.metaDataChanged.emit()
UM.Settings.ContainerRegistry.getInstance().addContainer(definition)
self._definition_id = definition.id
self.loaded.emit()
except Exception as e:
error_text = "An exception occurred loading file {0}:\n".format(file_path)
error_text += str(e)
error_text += traceback.format_exc()
self.error.emit(error_text)
loaded = pyqtSignal()
error = pyqtSignal(str, arguments=["errorText"])
metaDataChanged = pyqtSignal()
@pyqtProperty("QVariantMap", notify=metaDataChanged)
def metaData(self):
return self._metadata
@pyqtProperty(str, notify=loaded)
def definitionId(self):
return self._definition_id
signal.signal(signal.SIGINT, signal.SIG_DFL)
file_name = None
if len(sys.argv) > 1:
file_name = sys.argv[1]
del sys.argv[1]
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
qmlRegisterType(DefinitionLoader, "Example", 1, 0, "DefinitionLoader")
qmlRegisterType(DefinitionTreeModel.DefinitionTreeModel, "Example", 1, 0, "DefinitionTreeModel")
if file_name:
engine.rootContext().setContextProperty("open_file", QUrl.fromLocalFile(file_name))
engine.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "main.qml"))
app.exec_()
|
agpl-3.0
| 3,755,217,824,218,569,000
| 30.410256
| 96
| 0.677959
| false
| 3.712121
| false
| false
| false
|
yeon3683/handpose
|
util/handdetector.py
|
1
|
18839
|
"""Provides a basic hand detector in depth images.
HandDetector provides interface for detecting hands in depth image, by using the center of mass.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import os
import cv2
from scipy import stats, ndimage
class HandDetector(object):
"""
Detect hand based on simple heuristic, centered at Center of Mass
"""
RESIZE_BILINEAR = 0
RESIZE_CV2_NN = 1
RESIZE_CV2_LINEAR = 2
def __init__(self, dpt, fx, fy, importer=None):
"""
Constructor
:param dpt: depth image
:param fx: camera focal lenght
:param fy: camera focal lenght
"""
self.dpt = dpt
self.maxDepth = min(1500, dpt.max())
self.minDepth = max(10, dpt.min())
# set values out of range to 0
self.dpt[self.dpt > self.maxDepth] = 0.
self.dpt[self.dpt < self.minDepth] = 0.
# camera settings
self.fx = fx
self.fy = fy
self.importer = importer
# depth resize method
self.resizeMethod = self.RESIZE_CV2_NN
def calculateCoM(self, dpt):
"""
Calculate the center of mass
:param dpt: depth image
:return: (x,y,z) center of mass
"""
dc = dpt.copy()
dc[dc < self.minDepth] = 0
dc[dc > self.maxDepth] = 0
cc = ndimage.measurements.center_of_mass(dc > 0)
num = numpy.count_nonzero(dc)
com = numpy.array((cc[1]*num, cc[0]*num, dc.sum()), numpy.float)
if num == 0:
return numpy.array((0, 0, 0), numpy.float)
else:
return com/num
def checkImage(self, tol):
"""
Check if there is some content in the image
:param tol: tolerance
:return:True if image is contentful, otherwise false
"""
# print numpy.std(self.dpt)
if numpy.std(self.dpt) < tol:
return False
else:
return True
def getNDValue(self):
"""
Get value of not defined depth value distances
:return:value of not defined depth value
"""
if self.dpt[self.dpt < self.minDepth].shape[0] > self.dpt[self.dpt > self.maxDepth].shape[0]:
return stats.mode(self.dpt[self.dpt < self.minDepth])[0][0]
else:
return stats.mode(self.dpt[self.dpt > self.maxDepth])[0][0]
@staticmethod
def bilinearResize(src, dsize, ndValue):
"""
Bilinear resizing with sparing out not defined parts of the depth map
:param src: source depth map
:param dsize: new size of resized depth map
:param ndValue: value of not defined depth
:return:resized depth map
"""
dst = numpy.zeros((dsize[1], dsize[0]), dtype=numpy.float32)
x_ratio = float(src.shape[1] - 1) / dst.shape[1]
y_ratio = float(src.shape[0] - 1) / dst.shape[0]
for row in range(dst.shape[0]):
y = int(row * y_ratio)
y_diff = (row * y_ratio) - y # distance of the nearest pixel(y axis)
y_diff_2 = 1 - y_diff
for col in range(dst.shape[1]):
x = int(col * x_ratio)
x_diff = (col * x_ratio) - x # distance of the nearest pixel(x axis)
x_diff_2 = 1 - x_diff
y2_cross_x2 = y_diff_2 * x_diff_2
y2_cross_x = y_diff_2 * x_diff
y_cross_x2 = y_diff * x_diff_2
y_cross_x = y_diff * x_diff
# mathematically impossible, but just to be sure...
if(x+1 >= src.shape[1]) | (y+1 >= src.shape[0]):
raise UserWarning("Shape mismatch")
# set value to ND if there are more than two values ND
numND = int(src[y, x] == ndValue) + int(src[y, x + 1] == ndValue) + int(src[y + 1, x] == ndValue) + int(
src[y + 1, x + 1] == ndValue)
if numND > 2:
dst[row, col] = ndValue
continue
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
# interpolate only over known values, switch to linear interpolation
if src[y, x] == ndValue:
y2_cross_x2 = 0.
y2_cross_x = 1. - y_cross_x - y_cross_x2
if src[y, x + 1] == ndValue:
y2_cross_x = 0.
if y2_cross_x2 != 0.:
y2_cross_x2 = 1. - y_cross_x - y_cross_x2
if src[y + 1, x] == ndValue:
y_cross_x2 = 0.
y_cross_x = 1. - y2_cross_x - y2_cross_x2
if src[y + 1, x + 1] == ndValue:
y_cross_x = 0.
if y_cross_x2 != 0.:
y_cross_x2 = 1. - y2_cross_x - y2_cross_x2
# print src[y, x], src[y, x+1],src[y+1, x],src[y+1, x+1]
# normalize weights
if not ((y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.)):
sc = 1. / (y_cross_x + y_cross_x2 + y2_cross_x + y2_cross_x2)
y2_cross_x2 *= sc
y2_cross_x *= sc
y_cross_x2 *= sc
y_cross_x *= sc
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
if (y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.):
dst[row, col] = ndValue
else:
dst[row, col] = y2_cross_x2 * src[y, x] + y2_cross_x * src[y, x + 1] + y_cross_x2 * src[
y + 1, x] + y_cross_x * src[y + 1, x + 1]
return dst
def comToBounds(self, com, size):
"""
Calculate boundaries, project to 3D, then add offset and backproject to 2D (ux, uy are canceled)
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:return: xstart, xend, ystart, yend, zstart, zend
"""
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
xstart = int(numpy.floor((com[0] * com[2] / self.fx - size[0] / 2.) / com[2]*self.fx))
xend = int(numpy.floor((com[0] * com[2] / self.fx + size[0] / 2.) / com[2]*self.fx))
ystart = int(numpy.floor((com[1] * com[2] / self.fy - size[1] / 2.) / com[2]*self.fy))
yend = int(numpy.floor((com[1] * com[2] / self.fy + size[1] / 2.) / com[2]*self.fy))
return xstart, xend, ystart, yend, zstart, zend
def getCrop(self, dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z=True):
"""
Crop patch from image
:param dpt: depth image to crop from
:param xstart: start x
:param xend: end x
:param ystart: start y
:param yend: end y
:param zstart: start z
:param zend: end z
:param thresh_z: threshold z values
:return: cropped image
"""
if len(dpt.shape) == 2:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1])].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1]))), mode='constant', constant_values=0)
elif len(dpt.shape) == 3:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1]), :].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1])),
(0, 0)), mode='constant', constant_values=0)
else:
raise NotImplementedError()
if thresh_z is True:
msk1 = numpy.bitwise_and(cropped < zstart, cropped != 0)
msk2 = numpy.bitwise_and(cropped > zend, cropped != 0)
cropped[msk1] = zstart
cropped[msk2] = 0. # backface is at 0, it is set later
return cropped
def resizeCrop(self, crop, sz):
"""
Resize cropped image
:param crop: crop
:param sz: size
:return: resized image
"""
if self.resizeMethod == self.RESIZE_CV2_NN:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_NEAREST)
elif self.resizeMethod == self.RESIZE_BILINEAR:
rz = self.bilinearResize(crop, sz, self.getNDValue())
elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_LINEAR)
else:
raise NotImplementedError("Unknown resize method!")
return rz
def applyCrop3D(self, dpt, com, size, dsize, thresh_z=True, background=None):
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z)
wb = (xend - xstart)
hb = (yend - ystart)
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# depth resize
rz = self.resizeCrop(cropped, sz)
if background is None:
background = self.getNDValue() # use background as filler
ret = numpy.ones(dsize, numpy.float32) * background
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
return ret
def cropArea3D(self, com=None, size=(250, 250, 250), dsize=(128, 128), docom=False):
"""
Crop area of hand in 3D volumina, scales inverse to the distance of hand to camera
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:param dsize: (x,y) extent of the destination size
:return: cropped hand image, transformation matrix for joints, CoM in image coordinates
"""
# print com, self.importer.jointImgTo3D(com)
# import matplotlib.pyplot as plt
# import matplotlib
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.imshow(self.dpt, cmap=matplotlib.cm.jet)
if len(size) != 3 or len(dsize) != 2:
raise ValueError("Size must be 3D and dsize 2D bounding box")
if com is None:
com = self.calculateCoM(self.dpt)
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='.')
#############
# for simulating COM within cube
if docom is True:
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
if numpy.isclose(com[2], 0):
com[2] = 300.
com[0] += xstart
com[1] += ystart
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='x')
wb = (xend - xstart)
hb = (yend - ystart)
trans = numpy.asmatrix(numpy.eye(3, dtype=float))
trans[0, 2] = -xstart
trans[1, 2] = -ystart
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# print com, sz, cropped.shape, xstart, xend, ystart, yend, hb, wb, zstart, zend
if cropped.shape[0] > cropped.shape[1]:
scale = numpy.asmatrix(numpy.eye(3, dtype=float) * sz[1] / float(cropped.shape[0]))
else:
scale = numpy.asmatrix(numpy.eye(3, dtype=float) * sz[0] / float(cropped.shape[1]))
scale[2, 2] = 1
# depth resize
rz = self.resizeCrop(cropped, sz)
# pylab.imshow(rz); pylab.gray();t=transformPoint2D(com,scale*trans);pylab.scatter(t[0],t[1]); pylab.show()
ret = numpy.ones(dsize, numpy.float32) * self.getNDValue() # use background as filler
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
# print rz.shape
off = numpy.asmatrix(numpy.eye(3, dtype=float))
off[0, 2] = xstart
off[1, 2] = ystart
# fig = plt.figure()
# ax = fig.add_subplot(131)
# ax.imshow(cropped, cmap=matplotlib.cm.jet)
# ax = fig.add_subplot(132)
# ax.imshow(rz, cmap=matplotlib.cm.jet)
# ax = fig.add_subplot(133)
# ax.imshow(ret, cmap=matplotlib.cm.jet)
# plt.show(block=False)
# print trans,scale,off,off*scale*trans
return ret, off * scale * trans, com
def checkPose(self, joints):
"""
Check if pose is anatomically possible
@see Serre: Kinematic model of the hand using computer vision
:param joints: joint locations R^16x3
:return: true if pose is possible
"""
# check dip, pip of fingers
return True
def detect(self, size=(250, 250, 250), doHandSize=True):
"""
Detect the hand as closest object to camera
:param size: bounding box size
:return: center of mass of hand
"""
steps = 20
dz = (self.maxDepth - self.minDepth)/float(steps)
for i in range(steps):
part = self.dpt.copy()
part[part < i*dz + self.minDepth] = 0
part[part > (i+1)*dz + self.minDepth] = 0
part[part != 0] = 10 # set to something
ret, thresh = cv2.threshold(part, 1, 255, cv2.THRESH_BINARY)
thresh = thresh.astype(dtype=numpy.uint8)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in range(len(contours)):
if cv2.contourArea(contours[c]) > 200:
# centroid
M = cv2.moments(contours[c])
cx = int(numpy.rint(M['m10']/M['m00']))
cy = int(numpy.rint(M['m01']/M['m00']))
# crop
xstart = int(max(cx-100, 0))
xend = int(min(cx+100, self.dpt.shape[1]-1))
ystart = int(max(cy-100, 0))
yend = int(min(cy+100, self.dpt.shape[0]-1))
cropped = self.dpt[ystart:yend, xstart:xend].copy()
cropped[cropped < i*dz + self.minDepth] = 0.
cropped[cropped > (i+1)*dz + self.minDepth] = 0.
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
com[0] += xstart
com[1] += ystart
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
if doHandSize is True:
# refined contour for size estimation
part_ref = self.dpt.copy()
part_ref[part_ref < zstart] = 0
part_ref[part_ref > zend] = 0
part_ref[part_ref != 0] = 10 # set to something
ret, thresh_ref = cv2.threshold(part_ref, 1, 255, cv2.THRESH_BINARY)
contours_ref, _ = cv2.findContours(thresh_ref.astype(dtype=numpy.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find the largest contour
areas = [cv2.contourArea(cc) for cc in contours_ref]
c_max = numpy.argmax(areas)
# final result
return com, self.estimateHandsize(contours_ref[c_max], com, size)
else:
return com, size
# no appropriate hand detected
return numpy.array((0, 0, 0), numpy.float), size
def estimateHandsize(self, contours, com, cube=(250, 250, 250), tol=0):
"""
Estimate hand size from depth image
:param contours: contours of hand
:param com: center of mass
:param cube: default cube
:param tol: tolerance to be added to all sides
:return: metric cube for cropping (x, y, z)
"""
x, y, w, h = cv2.boundingRect(contours)
# drawing = numpy.zeros((480, 640), dtype=float)
# cv2.drawContours(drawing, [contours], 0, (255, 0, 244), 1, 8)
# cv2.rectangle(drawing, (x, y), (x+w, y+h), (244, 0, 233), 2, 8, 0)
# cv2.imshow("contour", drawing)
# convert to cube
xstart = (com[0] - w / 2.) * com[2] / self.fx
xend = (com[0] + w / 2.) * com[2] / self.fx
ystart = (com[1] - h / 2.) * com[2] / self.fy
yend = (com[1] + h / 2.) * com[2] / self.fy
szx = xend - xstart
szy = yend - ystart
sz = (szx + szy) / 2.
cube = (sz + tol, sz + tol, sz + tol)
return cube
|
gpl-3.0
| 4,676,184,767,098,701,000
| 39.86551
| 136
| 0.524019
| false
| 3.330799
| false
| false
| false
|
ytsarev/rally
|
rally/deploy/serverprovider/providers/existing.py
|
1
|
2084
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.deploy.serverprovider import provider
class ExistingServers(provider.ProviderFactory):
"""Just return endpoints from own configuration."""
CREDENTIALS_SCHEMA = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'user': {'type': 'string'},
'key': {'type': 'string'},
'password': {'type': 'string'}
},
'required': ['host', 'user']
}
CONFIG_SCHEMA = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'credentials': {
'type': 'array',
'items': CREDENTIALS_SCHEMA
},
},
'required': ['credentials']
}
def __init__(self, deployment, config):
super(ExistingServers, self).__init__(deployment, config)
self.credentials = config['credentials']
def create_servers(self):
servers = []
for endpoint in self.credentials:
servers.append(provider.Server(host=endpoint['host'],
user=endpoint['user'],
key=endpoint.get('key'),
password=endpoint.get('password'),
port=endpoint.get('port', 22)))
return servers
def destroy_servers(self):
pass
|
apache-2.0
| -183,121,135,223,786,270
| 32.612903
| 78
| 0.545106
| false
| 4.58022
| false
| false
| false
|
odicraig/kodi2odi
|
addons/plugin.video.teevee/default.py
|
1
|
19931
|
from __future__ import unicode_literals
from resources.lib.modules.addon import Addon
import sys,os,re
import urlparse,urllib
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
from resources.lib.modules import control,client,teevee2,metadata,cache
from resources.lib.modules.log_utils import log
meta_enabled = control.setting('tv_metadata') == 'true'
paginated = control.setting('limit_shows') == 'true'
offset = int(control.setting('results_number'))
base = 'http://opentuner.is/'
addon = Addon('plugin.video.teevee', sys.argv)
addon_handle = int(sys.argv[1])
if not os.path.exists(control.dataPath):
os.mkdir(control.dataPath)
AddonPath = addon.get_path()
themes=['new','simple']
theme = themes[int(control.setting('theme'))]
IconPath = os.path.join(AddonPath , "resources/media/%s"%theme)
def icon_path(filename):
if 'http://' in filename:
return filename
return os.path.join(IconPath, filename)
fanart = icon_path('fanart.jpg')
args = urlparse.parse_qs(sys.argv[2][1:])
mode = args.get('mode', None)
if mode is None:
addon.add_item({'mode': 'favourites'}, {'title':control.lang(30100).encode('utf-8')}, img=icon_path('Favourites.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'new_episodes', 'page':'1'}, {'title':control.lang(30101).encode('utf-8'), 'page':'1'}, img=icon_path('Latest_added.png'), fanart=fanart,is_folder=True)
if control.setting('enable_calendar')=='true':
addon.add_item({'mode': 'calendar'}, {'title':control.lang(30102).encode('utf-8')}, img=icon_path('Calendar.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/latest-added/', 'page':'1'}, {'title':control.lang(30103).encode('utf-8')}, img=icon_path('Latest_added.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/popular-today/', 'page':'1'}, {'title':control.lang(30104).encode('utf-8')}, img=icon_path('Popular.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'open_shows', 'url':'/most-popular/', 'page':'1'}, {'title':control.lang(30105).encode('utf-8')}, img=icon_path('Popular.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'alphabet'}, {'title':control.lang(30106).encode('utf-8')}, img=icon_path('AZ.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'genres'}, {'title':control.lang(30107).encode('utf-8')}, img=icon_path('Genre.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'downloader'}, {'title':control.lang(30108).encode('utf-8')}, img=icon_path('Downloads.png'), fanart=fanart,is_folder=True)
addon.add_item({'mode': 'search'}, {'title':control.lang(30109).encode('utf-8')}, img=icon_path('Search.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
from resources.lib.modules import cache, changelog
cache.get(changelog.get, 600000000, control.addonInfo('version'), table='changelog')
elif mode[0]=='favourites':
from resources.lib.modules import favourites
favs = favourites.get_favourites()
total=len(favs)
for fav in favs:
title,url,year = fav
url = base + url
meta = metadata.get_show_meta(title,url,year=year)
context = cache.get(teevee2.get_tv_context,10000,title,url,year,True)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'], total_items=total,contextmenu_items=context,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_shows':
url = url_sh = args['url'][0]
page = int(args['page'][0])
try: sort = args['sort'][0] == 'true'
except: sort = False
shows = cache.get(teevee2.get_shows,24,url)
if sort:
shows.sort(key=lambda x: x[1])
last = False
if paginated and meta_enabled:
if len(shows)<=offset:
last=True
pass
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(shows):
last = True
end = len(shows) - 1
shows = shows[start:end]
total = len(shows)
for show in shows:
url,title,year = show
meta = metadata.get_show_meta(title,url,year=year)
context = teevee2.get_tv_context(title,url,year,False)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total,is_folder=True)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'open_shows', 'url':url_sh, 'page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='new_episodes':
page = int(args['page'][0])
episodes = cache.get(teevee2.get_new_episodes,24)
last = False
if paginated and meta_enabled:
if len(episodes)<=offset:
last=True
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(episodes):
last = True
end = len(episodes) - 1
episodes = episodes[start:end]
total = len(episodes)
for ep in episodes:
url,showtitle,season,episode = ep
meta = metadata.get_episode_meta(showtitle,season,episode,url,more=True)
context = teevee2.get_episode_context(showtitle,season,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':'url'},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'new_episodes','page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='alphabet':
alphabet = teevee2.get_alphabet()
for al in alphabet:
addon.add_item({'mode': 'open_shows', 'url':al[0], 'page':'1','sort':'true'}, {'title':al[1]}, img=icon_path('AZ.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='genres':
alphabet = teevee2.get_genres()
for al in alphabet:
addon.add_item({'mode': 'open_shows', 'url':al[0], 'page':'1'}, {'title':al[1]}, img=icon_path('Genre.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_show':
url = args['url'][0]
show = args['title'][0]
imdb,seasons = teevee2.get_seasons(url)
meta = metadata.get_season_meta(show,len(seasons),imdb)
i = 0
for s in seasons:
addon.add_item({'mode': 'open_season', 'url':s[0], 'num':'%s'%(i+1)}, {'title':'%s %s'%(control.lang(30170).encode('utf-8'),s[1])}, img=meta[i]['cover_url'], fanart=meta[i]['backdrop_url'],is_folder=True)
i += 1
addon.end_of_directory()
elif mode[0]=='open_season':
url = args['url'][0]
num = args['num'][0]
imdb,showtitle,episodes = teevee2.get_episodes(url,num)
total = len(episodes)
for ep in episodes:
url,episode,episode_title = ep
meta = metadata.get_episode_meta(showtitle,num,episode,url,ep_title=episode_title)
if episode_title not in meta['title']:
meta['title'] = '%sx%s %s'%(num,episode,episode_title)
context = teevee2.get_episode_context(showtitle,num,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':url},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
addon.end_of_directory()
elif mode[0]=='calendar':
days = teevee2.get_month()
for day in days:
d=day[1]
m=day[2]
y=day[3]
mnth=day[4]
name=day[0]+', %s %s '%(d,mnth)
addon.add_item({'mode': 'open_day', 'day':d, 'month':m, 'year':y,'page':'1'},{'title': name}, img=icon_path('Calendar.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_day':
day = args['day'][0]
month = args['month'][0]
year = args['year'][0]
page = int(args['page'][0])
episodes = cache.get(teevee2.get_episodes_calendar,100,day,month,year)
last = False
if paginated and meta_enabled:
if len(episodes)<=offset:
last = True
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(episodes):
last = True
end = len(episodes) - 1
episodes = episodes[start:end]
total = len(episodes)
for ep in episodes:
url,season,episode,showtitle,year = ep
meta = metadata.get_episode_meta(showtitle,season,episode,url,more=True)
context = teevee2.get_episode_context(showtitle,season,episode,url,meta['cover_url'])
addon.add_video_item({'mode':'play_episode','url':url},meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'new_episodes','page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='play_episode':
url = args['url'][0]
links,sl = teevee2.get_sources(url)
if control.setting('autoplay')!='true':
i = control.selectDialog(sl)
if i>-1:
try:
url = links[i]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
import urlresolver
resolved = urlresolver.resolve(url)
if control.setting('use_TM')=='true':
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
resolved = TM.get_streaming_url(resolved)
except:
pass
addon.resolve_url(resolved)
except:
control.infoDialog(control.lang(30168).encode('utf-8'))
else:
index = 0
import urlresolver
done = False
checked = 0
while not done:
url = links[index%len(links)]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
try:
checked +=1
import urlresolver
resolved=urlresolver.resolve(url)
except:
index +=1
continue
if not resolved:
index +=1
continue
else:
break
if checked>=len(links):
resolved = False
break
if resolved:
if control.setting('use_TM')=='true':
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
resolved = TM.get_streaming_url(resolved)
except:
pass
addon.resolve_url(resolved)
elif mode[0] == 'downloader':
import resources.lib.modules.downloader as downloader
downloader.downloader()
elif mode[0] == 'addDownload':
name,url,image=args['name'][0],args['url'][0],args['thumb'][0]
import resources.lib.modules.downloader as downloader
downloader.addDownload(name,url,image)
elif mode[0] == 'removeDownload':
url=args['url'][0]
import resources.lib.modules.downloader as downloader
downloader.removeDownload(url)
elif mode[0] == 'startDownload':
import resources.lib.modules.downloader as downloader
downloader.startDownload()
elif mode[0] == 'startDownloadThread':
import resources.lib.modules.downloader as downloader
downloader.startDownloadThread()
elif mode[0] == 'stopDownload':
import resources.lib.modules.downloader as downloader
downloader.stopDownload()
elif mode[0] == 'statusDownload':
import resources.lib.modules.downloader as downloader
downloader.statusDownload()
elif mode[0]=='download':
url = args['url'][0]
title = args['title'][0]
image = args['thumb'][0]
tm = control.setting('dl_TM') == 'true'
try:
from dudehere.routines.transmogrifier import TransmogrifierAPI
TM = TransmogrifierAPI()
except:
tm = False
links,sl = teevee2.get_sources(url)
if control.setting('auto_download')!='true':
i = control.selectDialog(sl)
if i>-1:
try:
url = links[i]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
import urlresolver
resolved = urlresolver.resolve(url)
if tm:
resolved = resolved.split('|')[0]
ext = os.path.splitext(urlparse.urlparse(resolved).path)[1][1:].lower()
if ext == 'm3u8': raise Exception()
filename = title.replace(' ','_')
filename = re.sub('[^-a-zA-Z0-9_.() ]+', '', filename)
filename = filename.rstrip('.')
try:
season = re.findall('S(\d+)',title)[0]
episode = re.findall('E(\d+)',title)[0]
except:
season,episode = '',''
video = {
"type": 'tvshow',
"filename": filename + '.' + ext,
"url": resolved,
"season": season,
"episode": episode,
"addon": "plugin.video.teevee",
"save_dir": control.setting('download_folder')
}
response = TM.enqueue([video])
else:
import resources.lib.modules.downloader as downloader
downloader.addDownload(title,resolved,image,resolved=True)
except:
control.infoDialog(control.lang(30168).encode('utf-8'))
else:
resolved = False
index = 0
import urlresolver
done = False
checked = 0
while not done:
url = links[index%len(links)]
if 'iwatch' in url:
url = teevee2.resolve_iwatch(url)
try:
checked +=1
import urlresolver
resolved=urlresolver.resolve(url)
except:
index +=1
continue
if not resolved:
index +=1
continue
else:
break
if checked>=len(links):
resolved = False
break
if resolved:
if tm:
resolved = resolved.split('|')[0]
ext = os.path.splitext(urlparse.urlparse(resolved).path)[1][1:].lower()
if ext == 'm3u8': raise Exception()
filename = title.replace(' ','_')
filename = re.sub('[^-a-zA-Z0-9_.() ]+', '', filename)
filename = filename.rstrip('.')
try:
season = re.findall('S(\d+)',title)[0]
episode = re.findall('E(\d+)',title)[0]
except:
season,episode = '',''
video = {
"type": 'tvshow',
"filename": filename + '.' + ext,
"url": resolved,
"season": season,
"episode": episode,
"addon": "plugin.video.teevee",
"save_dir": control.setting('download_folder')
}
response = TM.enqueue([video])
else:
import resources.lib.modules.downloader as downloader
downloader.addDownload(title,resolved,image,resolved=True)
elif mode[0]=='add_tv_fav':
name = args['show'][0]
link = args['link'][0]
year = args['year'][0]
from resources.lib.modules import favourites
favourites.add_favourite_show(name,link,year)
elif mode[0]=='rem_tv_fav':
title = args['show'][0]
link = args['link'][0]
from resources.lib.modules import favourites
favourites.remove_tv_fav(title,link)
xbmc.executebuiltin("Container.Refresh")
elif mode[0]=='del_tv_all':
confirm = control.yesnoDialog(control.lang(30169).encode('utf-8'),control.lang(30401).encode('utf-8'),'')
if confirm==1:
from resources.lib.modules import favourites
favourites.delete_all_tv_favs()
xbmc.executebuiltin("Container.Refresh")
control.infoDialog(control.lang(30402).encode('utf-8'))
elif mode[0]=='search':
addon.add_item({'mode': 'open_key_search'}, {'title':'[COLOR green]%s[/COLOR]'%control.lang(30404).encode('utf-8')}, img=icon_path('Search.png'), fanart=fanart,is_folder=True)
from resources.lib.modules import favourites
queries = favourites.get_search_history('tv')
del_url = addon.build_plugin_url({'mode': 'del_his_tv'})
context = [(control.lang(30143).encode('utf-8'),'RunPlugin(%s)'%del_url)]
for q in queries:
addon.add_item({'mode': 'open_search', 'q': q, 'page':'1'}, {'title':q}, img=icon_path('Search.png'), fanart=fanart,contextmenu_items=context, is_folder=True)
addon.end_of_directory()
elif mode[0]=='open_key_search':
q = control.get_keyboard(control.lang(30403).encode('utf-8'))
if q:
from resources.lib.modules import favourites
url = addon.build_plugin_url({'mode':'open_search','q':q,'page':'1'})
favourites.add_search_query(q,'tv')
xbmc.executebuiltin("Container.Refresh")
import time
time.sleep(2)
control.execute('Container.Update(%s)'%url)
elif mode[0]=='open_search':
url = url_sh = args['q'][0]
page = int(args['page'][0])
shows = teevee2.search(url)
last = False
if paginated and meta_enabled:
if len(shows)<=offset:
last=True
pass
else:
start = (page-1)*offset
end = start + offset
if (end+1) >= len(shows):
last = True
end = len(shows) - 1
shows = shows[start:end]
total = len(shows)
for show in shows:
url,title,year = show
meta = metadata.get_show_meta(title,url,year=year)
context = teevee2.get_tv_context(title,url,year,False)
addon.add_item({'mode': 'open_show', 'url': url,'title': title}, meta,img=meta['cover_url'], fanart=meta['backdrop_url'],contextmenu_items=context, total_items=total,is_folder=True)
if paginated and meta_enabled and not last:
addon.add_item({'mode': 'open_search', 'q':url, 'page':'%s'%(page+1)}, {'title':control.lang(30171).encode('utf-8')}, img=icon_path('Next.png'), fanart=fanart,is_folder=True)
addon.end_of_directory()
elif mode[0]=='del_his_tv':
from resources.lib.modules import favourites
favourites.delete_history('tv')
xbmc.executebuiltin("Container.Refresh")
control.infoDialog(control.lang(30402).encode('utf-8'))
elif mode[0]=='clear_cache':
cache.clear()
|
gpl-3.0
| 4,522,614,937,469,297,000
| 37.315582
| 212
| 0.558928
| false
| 3.661767
| false
| false
| false
|
timj/scons
|
src/engine/SCons/Tool/rpm.py
|
1
|
4470
|
"""SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.get_abspath() )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].get_abspath() ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=C is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.get_abspath() )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
| 5,327,432,468,304,638,000
| 32.863636
| 92
| 0.650783
| false
| 3.914186
| false
| false
| false
|
svieira/Flask-HipPocket
|
flask_hippocket/tasks.py
|
1
|
3301
|
# -*- coding: utf-8 -*-
"""
flask.ext.hippocket.tasks
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013 by Sean Vieira.
:license: MIT, see LICENSE for more details.
"""
from flask import Blueprint, Markup, request, render_template
from itertools import chain
from os import path
from pkgutil import walk_packages
from werkzeug.utils import import_string
from werkzeug.exceptions import default_exceptions, HTTPException
def autoload(app, apps_package="apps", module_name="routes", blueprint_name="routes", on_error=None):
"""Automatically load Blueprints from the specified package and registers them with Flask."""
if not apps_package:
raise ValueError("No apps package provided - unable to begin autoload")
if isinstance(apps_package, basestring):
package_code = import_string(apps_package)
else:
#: `apps_package` can be the already imported parent package
#: (i.e. the following is a licit pattern)::
#:
#: import app_package
#: # do something else with app_package
#: autoload(app, app_package)
package_code = apps_package
apps_package = apps_package.__name__
package_paths = package_code.__path__
package_paths = [path.join(app.root_path, p) for p in package_paths]
root = apps_package
apps_package = apps_package + u"." if not apps_package.endswith(".") else apps_package
if on_error is None:
on_error = lambda name: app.logger.warn("Unable to import {name}.".format(name=name))
_to_import = "{base}.{module}.{symbol}"
import_template = lambda base: _to_import.format(base=base,
module=module_name,
symbol=blueprint_name)
#: Autoloaded apps must be Python packages
#: The root of the package is also inspected for a routing file
package_contents = chain([[None, root, True]],
walk_packages(path=package_paths, prefix=apps_package, onerror=on_error))
for _, sub_app_name, is_pkg in package_contents:
if not is_pkg:
continue
sub_app_import_path = import_template(base=sub_app_name)
sub_app = import_string(sub_app_import_path)
if isinstance(sub_app, Blueprint):
app.register_blueprint(sub_app)
else:
app.logger.warn(("Failed to register {name} - "
"it does not match the registration pattern.").format(name=sub_app_name))
def setup_errors(app, error_template="errors.html"):
"""Add a handler for each of the available HTTP error responses."""
def error_handler(error):
if isinstance(error, HTTPException):
description = error.get_description(request.environ)
code = error.code
name = error.name
else:
description = error
code = 500
name = "Internal Server Error"
return render_template(error_template,
code=code,
name=Markup(name),
description=Markup(description))
for exception in default_exceptions:
app.register_error_handler(exception, error_handler)
|
mit
| -5,610,174,353,615,824,000
| 37.383721
| 105
| 0.604059
| false
| 4.354881
| false
| false
| false
|
mscherer/rpmlint
|
InitScriptCheck.py
|
1
|
11946
|
# -*- coding: utf-8 -*-
#############################################################################
# Project : Mandriva Linux
# Module : rpmlint
# File : InitScriptCheck.py
# Author : Frederic Lepied
# Created On : Fri Aug 25 09:26:37 2000
# Purpose : check init scripts (files in /etc/rc.d/init.d)
#############################################################################
import os
import re
import sys
import rpm
from Filter import addDetails, printError, printWarning
import AbstractCheck
import Config
import Pkg
chkconfig_content_regex = re.compile('^\s*#\s*chkconfig:\s*([-0-9]+)\s+[-0-9]+\s+[-0-9]+')
subsys_regex = re.compile('/var/lock/subsys/([^/"\'\n\s;&|]+)', re.MULTILINE)
chkconfig_regex = re.compile('^[^#]*(chkconfig|add-service|del-service)', re.MULTILINE)
status_regex = re.compile('^[^#]*status', re.MULTILINE)
reload_regex = re.compile('^[^#]*reload', re.MULTILINE)
use_deflevels = Config.getOption('UseDefaultRunlevels', True)
lsb_tags_regex = re.compile('^# ([\w-]+):\s*(.*?)\s*$')
lsb_cont_regex = re.compile('^#(?:\t| )(.*?)\s*$')
use_subsys = Config.getOption('UseVarLockSubsys', True)
LSB_KEYWORDS = ('Provides', 'Required-Start', 'Required-Stop', 'Should-Start',
'Should-Stop', 'Default-Start', 'Default-Stop',
'Short-Description', 'Description')
RECOMMENDED_LSB_KEYWORDS = ('Provides', 'Required-Start', 'Required-Stop',
'Default-Stop', 'Short-Description')
class InitScriptCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, 'InitScriptCheck')
def check_binary(self, pkg):
initscript_list = []
for fname, pkgfile in pkg.files().items():
if not fname.startswith('/etc/init.d/') and \
not fname.startswith('/etc/rc.d/init.d/'):
continue
basename = os.path.basename(fname)
initscript_list.append(basename)
if pkgfile.mode & int("500", 8) != int("500", 8):
printError(pkg, 'init-script-non-executable', fname)
if "." in basename:
printError(pkg, 'init-script-name-with-dot', fname)
# check chkconfig call in %post and %preun
postin = pkg[rpm.RPMTAG_POSTIN] or \
pkg.scriptprog(rpm.RPMTAG_POSTINPROG)
if not postin:
printError(pkg, 'init-script-without-chkconfig-postin', fname)
elif not chkconfig_regex.search(postin):
printError(pkg, 'postin-without-chkconfig', fname)
preun = pkg[rpm.RPMTAG_PREUN] or \
pkg.scriptprog(rpm.RPMTAG_PREUNPROG)
if not preun:
printError(pkg, 'init-script-without-chkconfig-preun', fname)
elif not chkconfig_regex.search(preun):
printError(pkg, 'preun-without-chkconfig', fname)
status_found = False
reload_found = False
chkconfig_content_found = False
subsys_regex_found = False
in_lsb_tag = False
in_lsb_description = False
lastline = ''
lsb_tags = {}
# check common error in file content
content = None
try:
content = [x for x in Pkg.readlines(pkgfile.path)]
except Exception:
e = sys.exc_info()[1]
printWarning(pkg, 'read-error', e)
continue
content_str = "".join(content)
for line in content:
line = line[:-1] # chomp
# TODO check if there is only one line like this
if line.startswith('### BEGIN INIT INFO'):
in_lsb_tag = True
continue
if line.endswith('### END INIT INFO'):
in_lsb_tag = False
for kw, vals in lsb_tags.items():
if len(vals) != 1:
printError(pkg, 'redundant-lsb-keyword', kw)
for kw in RECOMMENDED_LSB_KEYWORDS:
if kw not in lsb_tags:
printWarning(pkg, 'missing-lsb-keyword',
"%s in %s" % (kw, fname))
if in_lsb_tag:
# TODO maybe we do not have to handle this ?
if lastline.endswith('\\'):
line = lastline + line
else:
res = lsb_tags_regex.search(line)
if not res:
cres = lsb_cont_regex.search(line)
if not (in_lsb_description and cres):
in_lsb_description = False
printError(
pkg, 'malformed-line-in-lsb-comment-block',
line)
else:
lsb_tags["Description"][-1] += \
" " + cres.group(1)
else:
tag = res.group(1)
if not tag.startswith('X-') and \
tag not in LSB_KEYWORDS:
printError(pkg, 'unknown-lsb-keyword', line)
else:
in_lsb_description = (tag == 'Description')
if tag not in lsb_tags:
lsb_tags[tag] = []
lsb_tags[tag].append(res.group(2))
lastline = line
if not status_found and status_regex.search(line):
status_found = True
if not reload_found and reload_regex.search(line):
reload_found = True
res = chkconfig_content_regex.search(line)
if res:
chkconfig_content_found = True
if use_deflevels:
if res.group(1) == '-':
printWarning(pkg, 'no-default-runlevel', fname)
elif res.group(1) != '-':
printWarning(pkg, 'service-default-enabled', fname)
res = subsys_regex.search(line)
if res:
subsys_regex_found = True
name = res.group(1)
if use_subsys and name != basename:
error = True
if name[0] == '$':
value = Pkg.substitute_shell_vars(name,
content_str)
if value == basename:
error = False
else:
i = name.find('}')
if i != -1:
name = name[0:i]
error = name != basename
if error and len(name):
if name[0] == '$':
printWarning(pkg, 'incoherent-subsys', fname,
name)
else:
printError(pkg, 'incoherent-subsys', fname,
name)
if "Default-Start" in lsb_tags:
if "".join(lsb_tags["Default-Start"]):
printWarning(pkg, 'service-default-enabled', fname)
if not status_found:
printError(pkg, 'no-status-entry', fname)
if not reload_found:
printWarning(pkg, 'no-reload-entry', fname)
if not chkconfig_content_found:
printError(pkg, 'no-chkconfig-line', fname)
if not subsys_regex_found and use_subsys:
printError(pkg, 'subsys-not-used', fname)
elif subsys_regex_found and not use_subsys:
printError(pkg, 'subsys-unsupported', fname)
if len(initscript_list) == 1:
pkgname = re.sub("-sysvinit$", "", pkg.name.lower())
goodnames = (pkgname, pkgname + 'd')
if initscript_list[0] not in goodnames:
printWarning(pkg, 'incoherent-init-script-name',
initscript_list[0], str(goodnames))
# Create an object to enable the auto registration of the test
check = InitScriptCheck()
addDetails(
'init-script-without-chkconfig-postin',
'''The package contains an init script but doesn't contain a %post with
a call to chkconfig.''',
'postin-without-chkconfig',
'''The package contains an init script but doesn't call chkconfig in its
%post script.''',
'init-script-without-chkconfig-preun',
'''The package contains an init script but doesn't contain a %preun with
a call to chkconfig.''',
'preun-without-chkconfig',
'''The package contains an init script but doesn't call chkconfig in its
%preun script.''',
'missing-lsb-keyword',
'''The package contains an init script that does not contain one of the LSB
init script comment block convention keywords that are recommendable for all
init scripts. If there is nothing to add to a keyword's value, include the
keyword in the script with an empty value. Note that as of version 3.2, the
LSB specification does not mandate presence of any keywords.''',
'no-status-entry',
'''In your init script (/etc/rc.d/init.d/your_file), you don't
have a 'status' entry, which is necessary for good functionality.''',
'no-reload-entry',
'''In your init script (/etc/rc.d/init.d/your_file), you don't
have a 'reload' entry, which is necessary for good functionality.''',
'no-chkconfig-line',
'''The init script doesn't contain a chkconfig line to specify the runlevels
at which to start and stop it.''',
'no-default-runlevel',
'''The default runlevel isn't specified in the init script.''',
'service-default-enabled',
'''The service is enabled by default after "chkconfig --add"; for security
reasons, most services should not be. Use "-" as the default runlevel in the
init script's "chkconfig:" line and/or remove the "Default-Start:" LSB keyword
to fix this if appropriate for this service.''',
'subsys-unsupported',
'''The init script uses /var/lock/subsys which is not supported by
this distribution.''',
'subsys-not-used',
'''While your daemon is running, you have to put a lock file in
/var/lock/subsys/. To see an example, look at this directory on your
machine and examine the corresponding init scripts.''',
'incoherent-subsys',
'''The filename of your lock file in /var/lock/subsys/ is incoherent
with your actual init script name. For example, if your script name
is httpd, you have to use 'httpd' as the filename in your subsys directory.
It is also possible that rpmlint gets this wrong, especially if the init
script contains nontrivial shell variables and/or assignments. These
cases usually manifest themselves when rpmlint reports that the subsys name
starts a with '$'; in these cases a warning instead of an error is reported
and you should check the script manually.''',
'incoherent-init-script-name',
'''The init script name should be the same as the package name in lower case,
or one with 'd' appended if it invokes a process by that name.''',
'init-script-name-with-dot',
'''The init script name should not contain a dot in its name. Some versions
of chkconfig don't work as expected with init script names like that.''',
'init-script-non-executable',
'''The init script should have at least the execution bit set for root
in order for it to run at boot time.''',
)
# InitScriptCheck.py ends here
# Local variables:
# indent-tabs-mode: nil
# py-indent-offset: 4
# End:
# ex: ts=4 sw=4 et
|
gpl-2.0
| -7,705,326,315,198,297,000
| 40.915789
| 90
| 0.536665
| false
| 4.215243
| true
| false
| false
|
RENCI/xDCIShare
|
hs_metrics/views.py
|
1
|
5571
|
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from django.contrib.auth.models import User
from mezzanine.generic.models import Rating, ThreadedComment
from theme.models import UserProfile # fixme switch to party model
from hs_core import hydroshare
from collections import Counter
class xDCIShareSiteMetrics(TemplateView):
template_name = 'hs_metrics/hydrosharesitemetrics.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(xDCIShareSiteMetrics, self).dispatch(request, *args, **kwargs)
def __init__(self, **kwargs):
super(xDCIShareSiteMetrics, self).__init__(**kwargs)
self.n_registered_users = User.objects.all().count()
self.n_host_institutions = 0
self.host_institutions = set()
self.n_users_logged_on = None # fixme need to track
self.max_logon_duration = None # fixme need to track
self.n_courses = 0
self.n_agencies = 0
self.agencies = set()
self.n_core_contributors = 6 # fixme need to track (use GItHub API Key) https://api.github.com/teams/328946
self.n_extension_contributors = 10 # fixme need to track (use GitHub API Key) https://api.github.com/teams/964835
self.n_citations = 0 # fixme hard to quantify
self.resource_type_counts = Counter()
self.user_titles = Counter()
self.user_professions = Counter()
self.user_subject_areas = Counter()
self.n_ratings = 0
self.n_comments = 0
self.n_resources = 0
def get_context_data(self, **kwargs):
"""
1. Number of registered users (with voluntarily supplied demography and diversity)
2. Number of host institutions (with demography).
3. Use statistics (for each month number and average log-on duration, maximum number of users logged on, total
CPU hours of model run time by different compute resources).
4. Number of courses and students using educational material (with demography and diversity based on user
information).
5. Number of ratings and comments about resources.
6. The quantity of hydrological data including data values, sites, and variables, and web service data requests
per day.
7. The number of non-CUAHSI agencies that utilize xDCIShare (e.g. NCDC).
8. The number of contributors to the core infrastructure code base.
9. The number of contributors to non-core code that is part of the system, such as clients or apps and other
software projects where changes are made to adapt for xDCIShare
10. The number of downloads of releases of clients and apps.
11. The number of users trained during the various outreach activities.
12. Number of papers submitted to and published in peer reviewed forums about this project or using the
infrastructure of this project. To the extent possible these will be stratified demographically and based
on whether they report contributions that are domain research or cyberinfrastructure. We will also measure
posters, invited talks, panel sessions, etc. We will also track citations generated by these papers.
13. Number of citations of various xDCIShare resources.
14. The types and amounts of resources stored within the system, and their associated downloads (resource types
will include data of varying type, model codes, scripts, workflows and documents).
:param kwargs:
:return:
"""
ctx = super(xDCIShareSiteMetrics, self).get_context_data(**kwargs)
self.get_resource_stats()
self.get_user_stats()
self.user_professions = self.user_professions.items()
self.user_subject_areas = self.user_subject_areas.items()
self.resource_type_counts = self.resource_type_counts.items()
self.user_titles = self.user_titles.items()
ctx['metrics'] = self
return ctx
def get_all_resources(self):
"""Yield all resources in the system as a single generator"""
resource_types = hydroshare.get_resource_types()
for qs in (res_model.objects.all() for res_model in resource_types):
for resource in qs:
yield resource
def get_resource_stats(self):
for resource in self.get_all_resources():
resource_type_name = resource._meta.verbose_name if hasattr(resource._meta, 'verbose_name') else resource._meta.model_name
self.resource_type_counts[resource_type_name] += 1
self.n_resources += 1
self.n_ratings = Rating.objects.all().count()
self.n_comments = ThreadedComment.objects.all().count()
def get_user_stats(self):
# FIXME revisit this with the hs_party application
for profile in UserProfile.objects.all():
if profile.organization_type in ('Government','Commercial'):
self.agencies.add(profile.organization)
else:
self.host_institutions.add(profile.organization)
self.user_professions[profile.profession] += 1
self.user_titles[profile.title] += 1
if profile.subject_areas:
self.user_subject_areas.update(a.strip() for a in profile.subject_areas.split(','))
self.n_host_institutions = len(self.host_institutions)
self.n_agencies = len(self.agencies)
|
bsd-3-clause
| -434,498,257,057,787,260
| 49.189189
| 134
| 0.675821
| false
| 4.051636
| false
| false
| false
|
willemarcel/pontocerto
|
pontocerto/core/views.py
|
1
|
1215
|
from rest_framework.generics import ListAPIView
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from django.forms import Form, CharField, FloatField
from django.http import HttpResponseRedirect
from django.views.generic.edit import FormView
from .models import Ponto
from .serializers import PontoSerializer
class GeojsonPontoList(ListAPIView):
queryset = Ponto.objects.all()
serializer_class = PontoSerializer
class PointForm(Form):
nome = CharField(max_length=100, required=False)
lat = FloatField(label="Latitude")
lon = FloatField(label="Longitude")
class CreatePointView(FormView):
template_name = 'core/create_point.html'
form_class = PointForm
def form_valid(self, form):
ponto = Ponto.objects.create(
nome=form.cleaned_data.get('nome'),
location=Point(
form.cleaned_data.get('lon'),
form.cleaned_data.get('lat')
)
)
url = reverse(
'admin:{0}_{1}_change'.format(
ponto._meta.app_label, ponto._meta.model_name
), args=(ponto.pk,)
)
return HttpResponseRedirect(url)
|
agpl-3.0
| -8,883,890,015,690,834,000
| 28.634146
| 61
| 0.655144
| false
| 3.983607
| false
| false
| false
|
fernandog/Medusa
|
medusa/server/web/home/post_process.py
|
1
|
1966
|
# coding=utf-8
from __future__ import unicode_literals
from medusa import process_tv
from medusa.helper.encoding import ss
from medusa.server.web.core import PageTemplate
from medusa.server.web.home.handler import Home
from six import string_types
from tornroutes import route
@route('/home/postprocess(/?.*)')
class HomePostProcess(Home):
def __init__(self, *args, **kwargs):
super(HomePostProcess, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, filename='home_postprocess.mako')
return t.render(topmenu='home', controller='home', action='postProcess')
def processEpisode(self, proc_dir=None, nzbName=None, jobName=None, quiet=None, process_method=None, force=None,
is_priority=None, delete_on='0', failed='0', proc_type='auto', ignore_subs=None, *args, **kwargs):
def argToBool(argument):
if isinstance(argument, string_types):
_arg = argument.strip().lower()
else:
_arg = argument
if _arg in ['1', 'on', 'true', True]:
return True
elif _arg in ['0', 'off', 'false', False]:
return False
return argument
if not proc_dir:
return self.redirect('/home/postprocess/')
else:
resource_name = ss(nzbName) if nzbName else None
result = process_tv.ProcessResult(ss(proc_dir), process_method=process_method).process(
resource_name=resource_name, force=argToBool(force), is_priority=argToBool(is_priority),
delete_on=argToBool(delete_on), failed=argToBool(failed), proc_type=type,
ignore_subs=argToBool(ignore_subs)
)
if quiet is not None and int(quiet) == 1:
return result
result = result.replace('\n', '<br>\n')
return self._genericMessage('Postprocessing results', result)
|
gpl-3.0
| -5,933,431,452,205,526,000
| 34.107143
| 121
| 0.606307
| false
| 3.870079
| false
| false
| false
|
CalthorpeAnalytics/urbanfootprint
|
footprint/main/management/commands/clear_future_built_forms.py
|
1
|
1703
|
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from optparse import make_option
import logging
from django.core.management.base import BaseCommand
from footprint.main.models.config.scenario import FutureScenario
from footprint.main.models.keys.keys import Keys
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
This command clears all layer_selections
"""
option_list = BaseCommand.option_list + (
make_option('-r', '--resave', action='store_true', default=False,
help='Resave all the config_entities to trigger signals'),
make_option('--scenario', default='', help='String matching a key of or more Scenario to run'),
)
def handle(self, *args, **options):
scenarios = FutureScenario.objects.filter(key__contains=options['scenario']) if options[
'scenario'] else FutureScenario.objects.all()
for scenario in scenarios:
future_scenario_feature_class = scenario.db_entity_feature_class(DbEntityKey.FUTURE_SCENARIO)
for future_scenario_feature in future_scenario_feature_class.objects.exclude(built_form__isnull=True):
future_scenario_feature.built_form = None
future_scenario_feature.save()
|
gpl-3.0
| -1,502,007,972,385,661,000
| 39.547619
| 114
| 0.711685
| false
| 4.064439
| false
| false
| false
|
frostasm/qt-creator
|
tests/system/suite_editors/tst_rename_macros/test.py
|
1
|
7824
|
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
def main():
global cppEditorStr
folder = prepareTemplate(os.path.abspath(os.path.join(os.getcwd(), "..", "shared",
"simplePlainCPP")))
if folder == None:
test.fatal("Could not prepare test files - leaving test")
return
proFile = os.path.join(folder, "testfiles.pro")
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
if not testRenameMacroAfterSourceModification():
return
headerName = "anothertestfile.h"
addCPlusPlusFileToCurrentProject(headerName, "C++ Header File",
expectedHeaderName=headerName)
if not testRenameMacroAfterSourceMoving():
return
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
def testRenameMacroAfterSourceModification():
def __deleteAnyClass__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 5)
type(cppEditorStr, "<Delete>")
test.log("Testing rename macro after modifying source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"class AnyClass", __deleteAnyClass__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Sources.testfile\\.cpp", "SOME_MACRO_NAME(a)")
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
performMacroRenaming('SOME_OTHER_MACRO_NAME')
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "SOME_OTHER_MACRO_NAME")
revertChanges(formerTexts)
return True
def testRenameMacroAfterSourceMoving():
def __cut__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 4)
invokeMenuItem("Edit", "Cut")
def __paste__():
global cppEditorStr
type(cppEditorStr, "<Return>")
invokeMenuItem("Edit", "Paste")
def __insertInclude__():
global cppEditorStr
typeLines(cppEditorStr, ['', '#include "anothertestfile.h"'])
test.log("Testing rename macro after moving source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"#define SOME_MACRO_NAME( X )\\", __cut__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Headers.anothertestfile\\.h",
"#define ANOTHERTESTFILE_H", __paste__)
if not content:
return False
formerTexts["testfiles.Headers.anothertestfile\\.h"] = content
content = openDocumentPlaceCursor('testfiles.Sources.testfile\\.cpp',
'#include "testfile.h"', __insertInclude__)
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
placeCursorToLine(cppEditorStr, "SOME_MACRO_NAME(a)")
performMacroRenaming("COMPLETELY_DIFFERENT_MACRO_NAME")
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "COMPLETELY_DIFFERENT_MACRO_NAME")
revertChanges(formerTexts)
return True
def performMacroRenaming(newMacroName):
for i in range(10):
type(cppEditorStr, "<Left>")
invokeContextMenuItem(waitForObject(cppEditorStr), "Refactor",
"Rename Symbol Under Cursor")
waitForSearchResults()
validateSearchResult(2)
replaceLineEdit = waitForObject("{leftWidget={text='Replace with:' type='QLabel' "
"unnamed='1' visible='1'} "
"type='Core::Internal::WideEnoughLineEdit' unnamed='1' "
"visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
replaceEditorContent(replaceLineEdit, newMacroName)
clickButton(waitForObject("{text='Replace' type='QToolButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
def verifyChangedContent(origTexts, replacedSymbol, replacement):
global cppEditorStr
successfullyCompared = []
for fileName,text in origTexts.iteritems():
if openDocument(fileName):
successfullyCompared.append(test.compare(waitForObject(cppEditorStr).plainText,
text.replace(replacedSymbol, replacement),
"Verifying content of %s" %
simpleFileName(fileName)))
else:
successfullyCompared.append(False)
test.fail("Failed to open document %s" % simpleFileName(fileName))
if successfullyCompared.count(True) == len(origTexts):
test.passes("Successfully compared %d changed files" % len(origTexts))
else:
test.fail("Verified %d files - %d have been successfully changed and %d failed to "
"change correctly." % (len(origTexts), successfullyCompared.count(True),
successfullyCompared.count(False)))
def revertChanges(files):
for f in files:
simpleName = simpleFileName(f)
if openDocument(f):
try:
invokeMenuItem('File', 'Revert "%s" to Saved' % simpleName)
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
test.log("Reverted changes inside %s" % simpleName)
except:
test.warning("File '%s' cannot be reverted." % simpleName,
"Maybe it has not been changed at all.")
else:
test.fail("Could not open %s for reverting changes" % simpleName)
|
gpl-3.0
| -6,147,371,400,253,073,000
| 44.488372
| 95
| 0.617587
| false
| 4.356347
| true
| false
| false
|
viapath/zippy
|
zippy/zippylib/interval.py
|
1
|
4651
|
#!/usr/bin/env python
__doc__=="""Interval Lists"""
__author__ = "David Brawand"
__license__ = "MIT"
__version__ = "2.3.4"
__maintainer__ = "David Brawand"
__email__ = "dbrawand@nhs.net"
__status__ = "Production"
import sys
from math import ceil
class Interval(object):
def __init__(self,chrom,chromStart,chromEnd,name=None,reverse=None,sample=None):
self.chrom = chrom
self.chromStart = int(chromStart)
self.chromEnd = int(chromEnd)
assert self.chromStart <= self.chromEnd # make sure its on the forward genomic strand
self.name = name if name else chrom+':'+str(chromStart)+'-'+str(chromEnd)
self.strand = 0 if reverse is None else -1 if reverse else 1
self.sample = sample
self.subintervals = IntervalList([])
return
def midpoint(self):
return int(self.chromStart + (self.chromEnd - self.chromStart)/2.0)
def locus(self):
'''returns interval of variant'''
return ( self.chrom, self.chromStart, self.chromEnd )
def __hash__(self):
return hash(str(self))
def __len__(self):
return self.chromEnd - self.chromStart
def __eq__(self,other):
return hash(self) == hash(other)
def __lt__(self,other):
return (self.chrom, self.chromStart, self.chromEnd) < (other.chrom, other.chromStart, other.chromEnd)
def __repr__(self):
return "<Interval ("+self.name+") "+self.chrom+":"+str(self.chromStart)+'-'+str(self.chromEnd)+ \
" ["+str(self.strand)+"] len:"+str(len(self))+">"
def __str__(self):
return "\t".join(map(str,[self.chrom, self.chromStart, self.chromEnd, self.name]))
def tile(self,i,o,suffix=True): # interval, overlap
splitintervals = int(ceil( (len(self)-o) / float(i-o) )) # interval number
optimalsize = int(ceil( (len(self) + splitintervals*o - o) / float(splitintervals) )) # optimal interval size
# get tile spans (and number of exons)
tilespan = []
for n,tilestart in enumerate(range(self.chromStart, self.chromEnd, optimalsize-o)):
tileend = min(tilestart+optimalsize, self.chromEnd)
tilespan.append((tilestart,tileend))
if tileend == self.chromEnd:
break
tiles = []
for n,t in enumerate(tilespan):
tilenumber = len(tilespan)-n if self.strand < 0 else n+1
tiles.append(Interval(self.chrom,t[0],t[1],self.name+'_'+str(tilenumber) if suffix else None, self.strand < 0))
return tiles
def extend(self,flank):
self.chromStart = self.chromStart-flank if flank <= self.chromStart else 0
self.chromEnd = self.chromEnd+flank
return self
def overlap(self,other): # also returnd bookended
return self.chrom == other.chrom and \
not (other.chromEnd < self.chromStart or other.chromStart > self.chromEnd)
def merge(self,other,subintervals=False):
if self.chrom == other.chrom and self.strand == other.strand:
self.chromStart = other.chromStart if other.chromStart < self.chromStart else self.chromStart
self.chromEnd = other.chromEnd if other.chromEnd > self.chromEnd else self.chromEnd
self.name = self.name if other.name == self.name else self.name + '_' + other.name
if subintervals and (self.subintervals or other.subintervals):
self.subintervals += other.subintervals
self.flattenSubintervals()
def addSubintervals(self,add):
for e in add:
if e.chromStart < self.chromStart:
self.chromStart = e.chromStart
if e.chromEnd > self.chromEnd:
self.chromEnd = e.chromEnd
self.subintervals.append(e)
self.subintervals.sort()
def flattenSubintervals(self):
if self.subintervals:
self.subintervals.sort()
merged = [ self.subintervals[0] ]
for i in range(1,len(self.subintervals)):
if merged[-1].overlap(self.subintervals[i]):
merged[-1].merge(self.subintervals[i])
else:
merged.append(self.subintervals[i])
self.subintervals = IntervalList(merged)
'''list of intervals'''
class IntervalList(list):
def __init__(self,elements,source=None):
list.__init__(self, elements)
self.source = source # source of intervals
def __str__(self):
return "<IntervalList (%s) %d elements> " % (self.source, len(self))
def __repr__(self):
return "<IntervalList (%s) %d elements> " % (self.source, len(self))
|
mit
| -7,593,002,557,638,501,000
| 38.752137
| 123
| 0.609546
| false
| 3.665091
| false
| false
| false
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/intltool/package.py
|
1
|
2529
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Intltool(AutotoolsPackage):
"""intltool is a set of tools to centralize translation of many different
file formats using GNU gettext-compatible PO files."""
homepage = 'https://freedesktop.org/wiki/Software/intltool/'
url = 'https://launchpad.net/intltool/trunk/0.51.0/+download/intltool-0.51.0.tar.gz'
list_url = 'https://launchpad.net/intltool/+download'
version('0.51.0', '12e517cac2b57a0121cda351570f1e63')
# requires XML::Parser perl module
depends_on('perl-xml-parser', type=('build', 'run'))
depends_on('perl@5.8.1:', type=('build', 'run'))
def check(self):
# `make check` passes but causes `make install` to fail
pass
def _make_executable(self, name):
return Executable(join_path(self.prefix.bin, name))
def setup_dependent_package(self, module, dependent_spec):
# intltool is very likely to be a build dependency,
# so we add the tools it provides to the dependent module
executables = [
'intltool-extract',
'intltoolize',
'intltool-merge',
'intltool-prepare',
'intltool-update'
]
for name in executables:
setattr(module, name, self._make_executable(name))
|
lgpl-2.1
| 8,018,918,679,636,101,000
| 40.459016
| 93
| 0.652432
| false
| 3.933126
| false
| false
| false
|
wavky/ManHourCalendar
|
mhcalendar/job.py
|
1
|
1583
|
#!/usr/bin/env python3
# @Time : 17-9-2 01:53
# @Author : Wavky Huang
# @Contact : master@wavky.com
# @File : job.py
"""
Process information of the job.
"""
class Job:
def __init__(self, required_manhour=0, daily_work_hours=0, hourly_pay=0, max_daily_overhours=0):
"""
Define your job's condition.
:param required_manhour: monthly manhour required by company
:param daily_work_hours: daily work hours required by company
:param hourly_pay: hourly pay offers by company
:param max_daily_overhours: how many hours you can work overtime per day, while minus means unlimited
"""
self.required_manhour = required_manhour
self.daily_work_hours = daily_work_hours
self.hourly_pay = hourly_pay
self.max_daily_overhours = max_daily_overhours
if max_daily_overhours < 0:
self.max_daily_overhours = 24 - daily_work_hours
if daily_work_hours + max_daily_overhours > 24:
self.max_daily_overhours = 24 - daily_work_hours
print("daily_work_hours + max_daily_overhours > 24, max_daily_overhours has been set to {0}.".format(
self.max_daily_overhours))
def __str__(self):
return "Current Job: \t Require manhour = {0} \t Daily work hours = {1} \n\
\t\t Hourly pay = {2} \t\t Max daily overhours = {3}".format(self.required_manhour, self.daily_work_hours,
self.hourly_pay, self.max_daily_overhours)
def __repr__(self):
return self.__str__()
|
mit
| -7,290,178,517,325,429,000
| 37.609756
| 113
| 0.60897
| false
| 3.456332
| false
| false
| false
|
exaile/exaile
|
xl/metadata/mka.py
|
1
|
2816
|
# Matroska tagger for Exaile
# Copyright (C) 2010 Johannes Sasongko <sasongko@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from xl.metadata import _base, _matroska
class MkaFormat(_base.BaseFormat):
others = False # For now, stick with defined tags only.
writable = False
tag_mapping = {
'album': ('TITLE', 50),
'album artist': ('ARTIST', 50),
'artist': ('ARTIST', 30),
'comment': ('COMMENT', 30),
'composer': ('COMPOSER', 30),
'date': ('DATE_RECORDED', 50),
'discnumber': ('PART_NUMBER', 50),
'genre': ('GENRE', 30),
'performer': ('PERFORMER', 30),
'title': ('TITLE', 30),
'tracknumber': ('PART_NUMBER', 30),
}
def _get_raw(self):
return self.tags
def load(self):
mka = _matroska.parse(self.loc)
segment = mka['Segment'][0]
info = segment['Info'][0]
try:
timecodescale = info['TimecodeScale'][0]
except KeyError:
timecodescale = 1000000
length = info['Duration'][0] * timecodescale / 1e9
self.tags = tags = {'__length': length}
for mkatags in segment['Tags']:
for mkatag in mkatags['Tag']:
target = int(mkatag['Targets'][0]['TargetTypevalue'][0])
for simpletag in mkatag['SimpleTag']:
key = (simpletag['TagName'][0], target)
try:
values = tags[key]
except KeyError:
values = tags[key] = []
values.append(simpletag['TagString'][0])
# vi: et sts=4 sw=4 ts=4
|
gpl-2.0
| 762,459,540,885,645,600
| 36.052632
| 81
| 0.622869
| false
| 3.868132
| false
| false
| false
|
pjdelport/django-cities
|
cities/conf.py
|
1
|
12698
|
# -*- coding: utf-8 -*-
from importlib import import_module
from collections import defaultdict
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
__all__ = [
'city_types', 'district_types',
'import_opts', 'import_opts_all', 'HookException', 'settings',
'ALTERNATIVE_NAME_TYPES', 'CONTINENT_DATA', 'CURRENCY_SYMBOLS',
'INCLUDE_AIRPORT_CODES', 'INCLUDE_NUMERIC_ALTERNATIVE_NAMES',
'NO_LONGER_EXISTENT_COUNTRY_CODES', 'SKIP_CITIES_WITH_EMPTY_REGIONS',
'SLUGIFY_FUNCTION', 'VALIDATE_POSTAL_CODES',
]
url_bases = {
'geonames': {
'dump': 'http://download.geonames.org/export/dump/',
'zip': 'http://download.geonames.org/export/zip/',
},
}
files = {
'country': {
'filename': 'countryInfo.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'code3',
'codeNum',
'fips',
'name',
'capital',
'area',
'population',
'continent',
'tld',
'currencyCode',
'currencyName',
'phone',
'postalCodeFormat',
'postalCodeRegex',
'languages',
'geonameid',
'neighbours',
'equivalentFips'
]
},
'region': {
'filename': 'admin1CodesASCII.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'subregion': {
'filename': 'admin2Codes.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'city': {
'filename': 'cities5000.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'geonameid',
'name',
'asciiName',
'alternateNames',
'latitude',
'longitude',
'featureClass',
'featureCode',
'countryCode',
'cc2',
'admin1Code',
'admin2Code',
'admin3Code',
'admin4Code',
'population',
'elevation',
'gtopo30',
'timezone',
'modificationDate'
]
},
'hierarchy': {
'filename': 'hierarchy.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'parent',
'child',
'type',
]
},
'alt_name': {
'filename': 'alternateNames.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'nameid',
'geonameid',
'language',
'name',
'isPreferred',
'isShort',
'isColloquial',
'isHistoric',
]
},
'postal_code': {
'filename': 'allCountries.zip',
'urls': [url_bases['geonames']['zip'] + '{filename}', ],
'fields': [
'countryCode',
'postalCode',
'placeName',
'admin1Name',
'admin1Code',
'admin2Name',
'admin2Code',
'admin3Name',
'admin3Code',
'latitude',
'longitude',
'accuracy',
]
}
}
country_codes = [
'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AW', 'AX', 'AZ',
'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ',
'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ',
'DE', 'DJ', 'DK', 'DM', 'DO', 'DZ',
'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET',
'FI', 'FJ', 'FK', 'FM', 'FO', 'FR',
'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY',
'HK', 'HM', 'HN', 'HR', 'HT', 'HU',
'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT',
'JE', 'JM', 'JO', 'JP',
'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'XK', 'KW', 'KY', 'KZ',
'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV', 'LY',
'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ',
'NA', 'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ',
'OM',
'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY',
'QA',
'RE', 'RO', 'RS', 'RU', 'RW',
'SA', 'SB', 'SC', 'SD', 'SS', 'SE', 'SG', 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV', 'SX', 'SY', 'SZ',
'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ',
'UA', 'UG', 'UM', 'US', 'UY', 'UZ',
'VA', 'VC', 'VE', 'VG', 'VI', 'VN', 'VU',
'WF', 'WS',
'YE', 'YT',
'ZA', 'ZM', 'ZW',
]
_ALTERNATIVE_NAME_TYPES = (
('name', _("Name")),
('abbr', _("Abbreviation")),
('link', _("Link")),
)
_AIRPORT_TYPES = (
('iata', _("IATA (Airport) Code")),
('icao', _("ICAO (Airport) Code")),
('faac', _("FAAC (Airport) Code")),
)
CONTINENT_DATA = {
'AF': ('Africa', 6255146),
'AS': ('Asia', 6255147),
'EU': ('Europe', 6255148),
'NA': ('North America', 6255149),
'OC': ('Oceania', 6255151),
'SA': ('South America', 6255150),
'AN': ('Antarctica', 6255152),
}
_CURRENCY_SYMBOLS = {
"AED": "د.إ", "AFN": "؋", "ALL": "L", "AMD": "դր.", "ANG": "ƒ", "AOA": "Kz",
"ARS": "$", "AUD": "$", "AWG": "ƒ", "AZN": "m",
"BAM": "KM", "BBD": "$", "BDT": "৳", "BGN": "лв", "BHD": "ب.د", "BIF": "Fr",
"BMD": "$", "BND": "$", "BOB": "Bs.", "BRL": "R$", "BSD": "$", "BTN": "Nu",
"BWP": "P", "BYR": "Br", "BZD": "$",
"CAD": "$", "CDF": "Fr", "CHF": "Fr", "CLP": "$", "CNY": "¥", "COP": "$",
"CRC": "₡", "CUP": "$", "CVE": "$, Esc", "CZK": "Kč",
"DJF": "Fr", "DKK": "kr", "DOP": "$", "DZD": "د.ج",
"EEK": "KR", "EGP": "£,ج.م", "ERN": "Nfk", "ETB": "Br", "EUR": "€",
"FJD": "$", "FKP": "£",
"GBP": "£", "GEL": "ლ", "GHS": "₵", "GIP": "£", "GMD": "D", "GNF": "Fr",
"GTQ": "Q", "GYD": "$",
"HKD": "$", "HNL": "L", "HRK": "kn", "HTG": "G", "HUF": "Ft",
"IDR": "Rp", "ILS": "₪", "INR": "₨", "IQD": "ع.د", "IRR": "﷼", "ISK": "kr",
"JMD": "$", "JOD": "د.ا", "JPY": "¥",
"KES": "Sh", "KGS": "лв", "KHR": "៛", "KMF": "Fr", "KPW": "₩", "KRW": "₩",
"KWD": "د.ك", "KYD": "$", "KZT": "Т",
"LAK": "₭", "LBP": "ل.ل", "LKR": "ரூ", "LRD": "$", "LSL": "L", "LTL": "Lt",
"LVL": "Ls", "LYD": "ل.د",
"MAD": "د.م.", "MDL": "L", "MGA": "Ar", "MKD": "ден", "MMK": "K",
"MNT": "₮", "MOP": "P", "MRO": "UM", "MUR": "₨", "MVR": "ރ.", "MWK": "MK",
"MXN": "$", "MYR": "RM", "MZN": "MT",
"NAD": "$", "NGN": "₦", "NIO": "C$", "NOK": "kr", "NPR": "₨", "NZD": "$",
"OMR": "ر.ع.",
"PAB": "B/.", "PEN": "S/.", "PGK": "K", "PHP": "₱", "PKR": "₨", "PLN": "zł",
"PYG": "₲",
"QAR": "ر.ق",
"RON": "RON", "RSD": "RSD", "RUB": "р.", "RWF": "Fr",
"SAR": "ر.س", "SBD": "$", "SCR": "₨", "SDG": "S$", "SEK": "kr", "SGD": "$",
"SHP": "£", "SLL": "Le", "SOS": "Sh", "SRD": "$", "STD": "Db",
"SYP": "£, ل.س", "SZL": "L",
"THB": "฿", "TJS": "ЅМ", "TMT": "m", "TND": "د.ت", "TOP": "T$", "TRY": "₤",
"TTD": "$", "TWD": "$", "TZS": "Sh",
"UAH": "₴", "UGX": "Sh", "USD": "$", "UYU": "$", "UZS": "лв",
"VEF": "Bs", "VND": "₫", "VUV": "Vt",
"WST": "T",
"XAF": "Fr", "XCD": "$", "XOF": "Fr", "XPF": "Fr",
"YER": "﷼",
"ZAR": "R", "ZMK": "ZK", "ZWL": "$",
}
_NO_LONGER_EXISTENT_COUNTRY_CODES = ['CS', 'AN']
_SLUGIFY_FUNCTION = getattr(django_settings, 'CITIES_SLUGIFY_FUNCTION', 'cities.util.default_slugify')
# See http://www.geonames.org/export/codes.html
city_types = ['PPL', 'PPLA', 'PPLC', 'PPLA2', 'PPLA3', 'PPLA4', 'PPLG']
district_types = ['PPLX']
# Command-line import options
import_opts = [
'all',
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
import_opts_all = [
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
# Raise inside a hook (with an error message) to skip the current line of data.
class HookException(Exception):
pass
# Hook functions that a plugin class may define
plugin_hooks = [
'country_pre', 'country_post', # noqa: E241
'region_pre', 'region_post', # noqa: E241
'subregion_pre', 'subregion_post', # noqa: E241
'city_pre', 'city_post', # noqa: E241
'district_pre', 'district_post', # noqa: E241
'alt_name_pre', 'alt_name_post', # noqa: E241
'postal_code_pre', 'postal_code_post', # noqa: E241
]
def create_settings():
def get_locales(self):
if hasattr(django_settings, "CITIES_LOCALES"):
locales = django_settings.CITIES_LOCALES[:]
else:
locales = ['en', 'und']
try:
locales.remove('LANGUAGES')
locales += [e[0] for e in django_settings.LANGUAGES]
except:
pass
return set([e.lower() for e in locales])
res = type('settings', (), {
'locales': property(get_locales),
})
res.files = files.copy()
if hasattr(django_settings, "CITIES_FILES"):
for key in list(django_settings.CITIES_FILES.keys()):
if 'filenames' in django_settings.CITIES_FILES[key] and 'filename' in django_settings.CITIES_FILES[key]:
raise ImproperlyConfigured(
"Only one key should be specified for '%s': 'filename' of 'filenames'. Both specified instead" % key
)
res.files[key].update(django_settings.CITIES_FILES[key])
if 'filenames' in django_settings.CITIES_FILES[key]:
del res.files[key]['filename']
if hasattr(django_settings, "CITIES_DATA_DIR"):
res.data_dir = django_settings.CITIES_DATA_DIR
if hasattr(django_settings, "CITIES_POSTAL_CODES"):
res.postal_codes = set([e.upper() for e in django_settings.CITIES_POSTAL_CODES])
else:
res.postal_codes = set(['ALL'])
return res()
def create_plugins():
settings.plugins = defaultdict(list)
for plugin in django_settings.CITIES_PLUGINS:
module_path, classname = plugin.rsplit('.', 1)
module = import_module(module_path)
class_ = getattr(module, classname)
obj = class_()
[settings.plugins[hook].append(obj) for hook in plugin_hooks if hasattr(obj, hook)]
settings = create_settings()
if hasattr(django_settings, "CITIES_PLUGINS"):
create_plugins()
if hasattr(django_settings, 'CITIES_IGNORE_EMPTY_REGIONS'):
raise Exception("CITIES_IGNORE_EMPTY_REGIONS was ambiguous and has been moved to CITIES_SKIP_CITIES_WITH_EMPTY_REGIONS")
SKIP_CITIES_WITH_EMPTY_REGIONS = getattr(django_settings, 'CITIES_SKIP_CITIES_WITH_EMPTY_REGIONS', False)
# Users may way to import historical countries
NO_LONGER_EXISTENT_COUNTRY_CODES = getattr(
django_settings, 'CITIES_NO_LONGER_EXISTENT_COUNTRY_CODES',
_NO_LONGER_EXISTENT_COUNTRY_CODES)
# Users may not want to include airport codes as alternative city names
INCLUDE_AIRPORT_CODES = getattr(django_settings, 'CITIES_INCLUDE_AIRPORT_CODES', True)
if INCLUDE_AIRPORT_CODES:
_ALTERNATIVE_NAME_TYPES += _AIRPORT_TYPES
# A `Choices` object (from `django-model-utils`)
ALTERNATIVE_NAME_TYPES = getattr(django_settings, 'CITIES_ALTERNATIVE_NAME_TYPES', _ALTERNATIVE_NAME_TYPES)
INCLUDE_NUMERIC_ALTERNATIVE_NAMES = getattr(django_settings, 'CITIES_INCLUDE_NUMERIC_ALTERNATIVE_NAMES', True)
# Allow users to override specified contents
CONTINENT_DATA.update(getattr(django_settings, 'CITIES_CONTINENT_DATA', {}))
CURRENCY_SYMBOLS = getattr(django_settings, 'CITIES_CURRENCY_SYMBOLS', _CURRENCY_SYMBOLS)
module_name, _, function_name = _SLUGIFY_FUNCTION.rpartition('.')
SLUGIFY_FUNCTION = getattr(import_module(module_name), function_name)
# Users who want better postal codes can flip this on (developers of
# django-cities itself probably will), but most probably won't want to
VALIDATE_POSTAL_CODES = getattr(django_settings, 'CITIES_VALIDATE_POSTAL_CODES', False)
|
mit
| -4,502,327,756,119,079,400
| 33.479452
| 141
| 0.487167
| false
| 2.798532
| false
| false
| false
|
kamekoopa/git-lab
|
git_lab/apis/mergerequest/repositories.py
|
1
|
2549
|
# -*- coding: utf-8 -*-
from git_lab.apis.mergerequest.models import MergeRequest, Note
class MergeRequestRepository(object):
def __init__(self, client=None, project=None):
u"""
@param client : GitLabクライアント
@type client : gitlab.Gitlab
"""
from git_lab.utils import get_client, get_project
self.client = client if client is not None else get_client()
self.project = project if project is not None else get_project()
def get_request(self, req_id):
mr = self.client.getmergerequest(self.project, req_id)
if mr is False:
return None
else:
return MergeRequest(mr)
def get_notes(self, req_id):
u"""指定されたマージリクエストIDに紐づくノートの一覧を取得します
@param req_id : マージリクエストID
@type req_id : int
@return : ノートのリスト
@rtype : list of Note
"""
notes = self.client.getmergerequestnotes(self.project, req_id)
if notes is False:
return []
else:
results = []
for note in notes:
results.append(Note(note))
return results
def get_requests(self, page=1, per_page=20):
u"""
@param page : ページ数
@type page : int
@param per_page : ページ当たりの取得数
@type per_page : int
@return : マージリクエストのリスト
@rtype : list of MergeRequest
"""
mrs = self.client.getmergerequests(self.project, page=page, per_page=per_page)
if mrs is False:
return []
else:
result = []
for mr in mrs:
result.append(MergeRequest(mr))
return result
def create_requests(self, source_branch, target_project_id, target_branch, title):
u"""
@param source_branch : 送り元ブランチ
@type source_branch : str
@param target_project_id : 送り先プロジェクト
@type target_project_id : str | None
@param target_branch : 送り先ブランチ
@type target_branch : str
@param title : タイトル
@type title : str
@return : 成否
@rtype : bool
"""
return self.client.createmergerequest2(
self.project,
source_branch,
target_project_id,
target_branch,
title
)
|
apache-2.0
| -8,188,333,652,436,813,000
| 24.150538
| 86
| 0.545105
| false
| 3.262204
| false
| false
| false
|
qateam123/eq
|
app/utilities/schema.py
|
1
|
1339
|
import logging
from app import cache
from app.parser.v0_0_1.schema_parser import SchemaParser
from app.schema_loader.schema_loader import load_schema
logger = logging.getLogger(__name__)
def get_schema(metadata):
"""
Get the schema for the current user
:return: (json, schema) # Tuple of json and schema object from schema file
"""
eq_id = metadata["eq_id"]
form_type = metadata["form_type"]
language_code = metadata["language_code"] if "language_code" in metadata else None
logger.debug("Requested questionnaire %s for form type %s", eq_id, form_type)
json_schema, schema = load_and_parse_schema(eq_id, form_type, language_code)
return json_schema, schema
@cache.memoize()
def load_and_parse_schema(eq_id, form_type, language_code):
"""
Use the schema loader to get the schema from disk. Then use the parse to construct the object schema
:param eq_id: the id of the questionnaire
:param form_type: the form type
:return: (json, schema) # Tuple of json and schema object from schema file
"""
# load the schema
json_schema = load_schema(eq_id, form_type, language_code)
if json_schema:
parser = SchemaParser(json_schema)
schema = parser.parse()
return json_schema, schema
else:
raise ValueError("No schema available")
|
mit
| 4,259,916,970,787,906,000
| 30.880952
| 104
| 0.68708
| false
| 3.719444
| false
| false
| false
|
Smashman/UKNetrunnerRankings
|
app/tournament/models.py
|
1
|
1715
|
import datetime
from app import db
class Tournament(db.Model):
id = db.Column(db.Integer, primary_key=True)
upload_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
uploaded = db.Column(db.DateTime, default=datetime.datetime.utcnow)
date = db.Column(db.Date)
type = db.Column(db.Enum('sc', 'regi', 'nati'))
location = db.Column(db.String(1024))
mwl = db.Column(db.Boolean)
filename = db.Column(db.String(256))
file_type = db.Column(db.Enum('txt', 'json'))
participants = db.relationship('Participant', backref=db.backref('tournament'))
upload_user = db.relationship('User', backref='tournament')
def __init__(self, filename):
self.filename = filename
self.file_type = filename.split('.')[-1]
class Participant(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tournament_id = db.Column(db.Integer, db.ForeignKey('tournament.id'))
runner_ident_id = db.Column(db.Integer, db.ForeignKey('identity.id'))
corp_ident_id = db.Column(db.Integer, db.ForeignKey('identity.id'))
user = db.relationship('User', backref='participant')
runner_ident = db.relationship('Identity', foreign_keys=runner_ident_id)
corp_ident = db.relationship('Identity', foreign_keys=corp_ident_id)
class Result(db.Model):
id = db.Column(db.Integer, primary_key=True)
participant_id = db.Column(db.Integer, db.ForeignKey('participant.id'))
position = db.Column(db.Integer)
points = db.Column(db.Integer)
strength_of_schedule = db.Column(db.Float)
extended_sos = db.Column(db.Float)
participant = db.relationship('Participant', backref='result')
|
gpl-2.0
| -491,337,908,336,923,500
| 38
| 83
| 0.683965
| false
| 3.304432
| false
| false
| false
|
michael-lazar/praw3
|
setup.py
|
1
|
2609
|
"""praw setup.py"""
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = 'praw'
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, '__init__.py'),
encoding='utf-8') as fp:
VERSION = re.search("__version__ = '([^']+)'", fp.read()).group(1)
dependencies = ['decorator >=4.0.9, <4.1',
'requests >=2.4.0',
'six ==1.10']
try:
from OpenSSL import __version__ as opensslversion
opensslversion = [int(minor) if minor.isdigit() else minor
for minor in opensslversion.split('.')]
if opensslversion < [0, 15]: # versions less than 0.15 have a regression
dependencies.append('pyopenssl >=0.15')
except ImportError:
pass # open ssl not installed
setup(name=PACKAGE_NAME,
author='Timothy Mellor',
author_email='timothy.mellor+pip@gmail.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Utilities'],
description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '
'python package that allows for simple access to '
'reddit\'s API.'),
entry_points={'console_scripts': [
'praw-multiprocess = praw.multiprocess:run']},
install_requires=dependencies,
keywords='reddit api wrapper',
license='GPLv3',
long_description=README,
maintainer='Michael Lazar',
maintainer_email='lazar.michael22@gmail.com',
package_data={'': ['COPYING'], PACKAGE_NAME: ['*.ini']},
packages=[PACKAGE_NAME],
tests_require=['betamax >=0.5.1, <0.6',
'betamax-matchers >=0.2.0, <0.3',
'betamax-serializers >=0.1.1, <0.2',
'mock ==1.0.1'],
test_suite='tests',
url='https://praw.readthedocs.io/',
version=VERSION)
|
gpl-3.0
| -5,172,743,039,084,221,000
| 37.940299
| 77
| 0.579149
| false
| 3.876672
| false
| false
| false
|
sadjadasghari/deeplab4a2d
|
loss_from_log.py
|
1
|
5089
|
#!/usr/bin/env python
# Martin Kersner, 2016/03/11
from __future__ import print_function
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
from utils import strstr
def main():
output_data, log_files = process_arguments(sys.argv)
train_iteration = []
train_loss = []
train_accuracy0 = []
train_accuracy1 = []
train_accuracy2 = []
train_accuracy3 = []
train_accuracy4 = []
train_accuracy5 = []
base_train_iter = 0
for log_file in log_files:
with open(log_file, 'rb') as f:
if len(train_iteration) != 0:
base_train_iter = train_iteration[-1]
for line in f:
if strstr(line, 'Iteration') and strstr(line, 'loss'):
matched = match_loss(line)
train_loss.append(float(matched.group(1)))
matched = match_iteration(line)
train_iteration.append(int(matched.group(1))+base_train_iter)
# strong labels
elif strstr(line, 'Train net output #0: accuracy '):
matched = match_net_accuracy(line)
train_accuracy0.append(float(matched.group(1)))
elif strstr(line, 'Train net output #1: accuracy '):
matched = match_net_accuracy(line)
train_accuracy1.append(float(matched.group(1)))
elif strstr(line, 'Train net output #2: accuracy '):
matched = match_net_accuracy(line)
train_accuracy2.append(float(matched.group(1)))
# weak labels
elif strstr(line, 'Train net output #0: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy0.append(float(matched.group(1)))
elif strstr(line, 'Train net output #1: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy1.append(float(matched.group(1)))
elif strstr(line, 'Train net output #2: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy2.append(float(matched.group(1)))
elif strstr(line, 'Train net output #3: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy3.append(float(matched.group(1)))
elif strstr(line, 'Train net output #4: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy4.append(float(matched.group(1)))
elif strstr(line, 'Train net output #5: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy5.append(float(matched.group(1)))
if output_data == 'loss':
for x in train_loss:
print(x)
if output_data == 'acc1':
for x,y,z in zip(train_accuracy0, train_accuracy1, train_accuracy2):
print(x, y, z)
if output_data == 'acc2':
for x,y,z in zip(train_accuracy3, train_accuracy4, train_accuracy5):
print(x, y, z)
## loss
plt.plot(train_iteration, train_loss, 'k', label='Train loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Number of iterations')
plt.savefig('loss.png')
## evaluation
plt.clf()
if len(train_accuracy3) != 0:
plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='accuracy bbox 0')
plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='accuracy bbox 1')
plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='accuracy bbox 2')
plt.plot(range(len(train_accuracy3)), train_accuracy3, 'b', label='accuracy strong 0')
plt.plot(range(len(train_accuracy4)), train_accuracy4, 'c', label='accuracy strong 1')
plt.plot(range(len(train_accuracy5)), train_accuracy5, 'm', label='accuracy strong 2')
else:
plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='train accuracy 0')
plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='train accuracy 1')
plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='train accuracy 2')
plt.legend(loc=0)
plt.savefig('evaluation.png')
def match_iteration(line):
return re.search(r'Iteration (.*),', line)
def match_loss(line):
return re.search(r'loss = (.*)', line)
def match_net_accuracy(line):
return re.search(r'accuracy = (.*)', line)
def match_net_accuracy_bbox(line):
return re.search(r'accuracy_bbox = (.*)', line)
def match_net_accuracy_strong(line):
return re.search(r'accuracy_strong = (.*)', line)
def process_arguments(argv):
if len(argv) < 2:
help()
output_data = None
log_files = argv[2:]
if argv[1].lower() == 'loss':
output_data = 'loss'
elif argv[1].lower() == 'acc1':
output_data = 'acc1'
elif argv[1].lower() == 'acc2':
output_data = 'acc2'
else:
log_files = argv[1:]
return output_data, log_files
def help():
print('Usage: python loss_from_log.py [OUTPUT_TYPE] [LOG_FILE]+\n'
'OUTPUT_TYPE can be either loss, acc1 or acc 2\n'
'LOG_FILE is text file containing log produced by caffe.\n'
'At least one LOG_FILE has to be specified.\n'
'Files has to be given in correct order (the oldest logs as the first ones).'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,591,932,198,186,486,000
| 31.208861
| 90
| 0.63706
| false
| 3.339239
| false
| false
| false
|
ebuendia/ProyectoPython
|
src/xml-parser.py
|
1
|
2674
|
import re
from device import Device
from group import Group
from capability import Capability
def startDevices(line):
return re.match(r"<devices",line.strip()) != None
def beginDevice(line):
return re.match(r"<device",line.strip()) != None
def endDevice(line):
return re.match(r"</device>", line.strip()) != None
def beginGroup(line):
return re.match(r"<group", line.strip()) != None
def endGroup(line):
return re.match(r"</group>", line.strip()) != None
def beginCapability(line):
return re.match(r"<capability", line.strip()) != None
def endDevices(line):
return re.match(r"</devices>", line.strip()) != None
def deleteTags(line, tag, etag):
return line.strip().replace(tag,"").replace(etag,"")
def getAttrId(line):
return line.rsplit(" ")[0].replace("id=","").replace('"',"")
def getAttrUser(line):
return line.rsplit(" ")[1].replace("user_agent=","").replace('"',"")
def getAttrFall(line):
return line.rsplit(" ")[2].replace("fall_back=","").replace('"',"")
def getAttrName(line):
return line.rsplit(" ")[0].replace("name=","").replace('"',"")
def getAttrValue(line):
return line.rsplit(" ")[1].replace("value=","").replace('"',"")
# Funcion Principal
def main():
file = open("test.xml","r")
line = file.readline()
while not startDevices(line):
line = file.readline()
line = file.readline().strip()
devices = []
device = ""
group = ""
capability = ""
while not endDevices(line):
if beginDevice(line):
line = deleteTags(line,"<device ",">")
att_id = getAttrId(line)
att_user = getAttrUser(line)
att_fall = getAttrFall(line)
device = Device(att_id, att_user, att_fall)
line = file.readline()
if endDevice(line):
devices.append(device)
line = file.readline()
if beginGroup(line):
line = deleteTags(line,"<group ",">")
att_id = getAttrId(line)
group = Group(att_id)
group.setDevice(device)
line = file.readline()
if endGroup(line):
device.addGroup(group)
line = file.readline()
if beginCapability(line):
line = deleteTags(line, "<capability ", "/>")
att_name = getAttrName(line)
att_value = getAttrValue(line)
capability = Capability(att_name, att_value)
capability.setGroup(group)
group.addCapability(capability)
line = file.readline()
print "Devices\n"
printDevices(devices)
print "End Devices\n"
file.close()
return 0
def printDevices(list):
for device in list:
print device
printGroups(device)
def printGroups(device):
for group in device.getGroups():
print group
printCapabilities(group)
def printCapabilities(group):
for capability in group.getCapabilities():
print capability
if __name__ == '__main__':
main()
|
unlicense
| -1,180,188,492,776,922,400
| 21.470588
| 69
| 0.666791
| false
| 3.052511
| false
| false
| false
|
google-research/disentanglement_lib
|
disentanglement_lib/evaluation/metrics/factor_vae.py
|
1
|
8683
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the disentanglement metric from the FactorVAE paper.
Based on "Disentangling by Factorising" (https://arxiv.org/abs/1802.05983).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from disentanglement_lib.evaluation.metrics import utils
import numpy as np
from six.moves import range
import gin.tf
@gin.configurable(
"factor_vae_score",
blacklist=["ground_truth_data", "representation_function", "random_state",
"artifact_dir"])
def compute_factor_vae(ground_truth_data,
representation_function,
random_state,
artifact_dir=None,
batch_size=gin.REQUIRED,
num_train=gin.REQUIRED,
num_eval=gin.REQUIRED,
num_variance_estimate=gin.REQUIRED):
"""Computes the FactorVAE disentanglement metric.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
artifact_dir: Optional path to directory where artifacts can be saved.
batch_size: Number of points to be used to compute the training_sample.
num_train: Number of points used for training.
num_eval: Number of points used for evaluation.
num_variance_estimate: Number of points used to estimate global variances.
Returns:
Dictionary with scores:
train_accuracy: Accuracy on training set.
eval_accuracy: Accuracy on evaluation set.
"""
del artifact_dir
logging.info("Computing global variances to standardise.")
global_variances = _compute_variances(ground_truth_data,
representation_function,
num_variance_estimate, random_state)
active_dims = _prune_dims(global_variances)
scores_dict = {}
if not active_dims.any():
scores_dict["train_accuracy"] = 0.
scores_dict["eval_accuracy"] = 0.
scores_dict["num_active_dims"] = 0
return scores_dict
logging.info("Generating training set.")
training_votes = _generate_training_batch(ground_truth_data,
representation_function, batch_size,
num_train, random_state,
global_variances, active_dims)
classifier = np.argmax(training_votes, axis=0)
other_index = np.arange(training_votes.shape[1])
logging.info("Evaluate training set accuracy.")
train_accuracy = np.sum(
training_votes[classifier, other_index]) * 1. / np.sum(training_votes)
logging.info("Training set accuracy: %.2g", train_accuracy)
logging.info("Generating evaluation set.")
eval_votes = _generate_training_batch(ground_truth_data,
representation_function, batch_size,
num_eval, random_state,
global_variances, active_dims)
logging.info("Evaluate evaluation set accuracy.")
eval_accuracy = np.sum(eval_votes[classifier,
other_index]) * 1. / np.sum(eval_votes)
logging.info("Evaluation set accuracy: %.2g", eval_accuracy)
scores_dict["train_accuracy"] = train_accuracy
scores_dict["eval_accuracy"] = eval_accuracy
scores_dict["num_active_dims"] = len(active_dims)
return scores_dict
@gin.configurable("prune_dims", blacklist=["variances"])
def _prune_dims(variances, threshold=0.):
"""Mask for dimensions collapsed to the prior."""
scale_z = np.sqrt(variances)
return scale_z >= threshold
def _compute_variances(ground_truth_data,
representation_function,
batch_size,
random_state,
eval_batch_size=64):
"""Computes the variance for each dimension of the representation.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observation as input and
outputs a representation.
batch_size: Number of points to be used to compute the variances.
random_state: Numpy random state used for randomness.
eval_batch_size: Batch size used to eval representation.
Returns:
Vector with the variance of each dimension.
"""
observations = ground_truth_data.sample_observations(batch_size, random_state)
representations = utils.obtain_representation(observations,
representation_function,
eval_batch_size)
representations = np.transpose(representations)
assert representations.shape[0] == batch_size
return np.var(representations, axis=0, ddof=1)
def _generate_training_sample(ground_truth_data, representation_function,
batch_size, random_state, global_variances,
active_dims):
"""Sample a single training sample based on a mini-batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observation as input and
outputs a representation.
batch_size: Number of points to be used to compute the training_sample.
random_state: Numpy random state used for randomness.
global_variances: Numpy vector with variances for all dimensions of
representation.
active_dims: Indexes of active dimensions.
Returns:
factor_index: Index of factor coordinate to be used.
argmin: Index of representation coordinate with the least variance.
"""
# Select random coordinate to keep fixed.
factor_index = random_state.randint(ground_truth_data.num_factors)
# Sample two mini batches of latent variables.
factors = ground_truth_data.sample_factors(batch_size, random_state)
# Fix the selected factor across mini-batch.
factors[:, factor_index] = factors[0, factor_index]
# Obtain the observations.
observations = ground_truth_data.sample_observations_from_factors(
factors, random_state)
representations = representation_function(observations)
local_variances = np.var(representations, axis=0, ddof=1)
argmin = np.argmin(local_variances[active_dims] /
global_variances[active_dims])
return factor_index, argmin
def _generate_training_batch(ground_truth_data, representation_function,
batch_size, num_points, random_state,
global_variances, active_dims):
"""Sample a set of training samples based on a batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
batch_size: Number of points to be used to compute the training_sample.
num_points: Number of points to be sampled for training set.
random_state: Numpy random state used for randomness.
global_variances: Numpy vector with variances for all dimensions of
representation.
active_dims: Indexes of active dimensions.
Returns:
(num_factors, dim_representation)-sized numpy array with votes.
"""
votes = np.zeros((ground_truth_data.num_factors, global_variances.shape[0]),
dtype=np.int64)
for _ in range(num_points):
factor_index, argmin = _generate_training_sample(ground_truth_data,
representation_function,
batch_size, random_state,
global_variances,
active_dims)
votes[factor_index, argmin] += 1
return votes
|
apache-2.0
| -9,107,700,491,507,206,000
| 42.415
| 80
| 0.657031
| false
| 4.412093
| false
| false
| false
|
uclouvain/OSIS-Louvain
|
base/tests/business/education_groups/test_delete.py
|
1
|
8462
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import TestCase
from base.business.education_groups import delete
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_types import GroupType
from base.models.group_element_year import GroupElementYear
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.authorized_relationship import AuthorizedRelationshipFactory
from base.tests.factories.education_group_year import TrainingFactory, GroupFactory
from base.tests.factories.group_element_year import GroupElementYearFactory
class TestHaveContents(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2019)
def test_have_contents_case_no_contents(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
self.assertFalse(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_no_contents_which_because_mandatory_structure(self):
"""
In this test, we ensure that all of his children are mandatory groups and they are empty.
It must be consider as empty
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
for education_group_type in [GroupType.COMMON_CORE.name, GroupType.FINALITY_120_LIST_CHOICE.name]:
child = GroupFactory(academic_year=self.academic_year, education_group_type__name=education_group_type)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child.education_group_type,
min_count_authorized=1,
)
GroupElementYearFactory(parent=education_group_year, child_branch=child)
self.assertFalse(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_have_contents_because_mandatory_structure_is_present_multiple_times(self):
"""
In this test, we ensure that we have two elements of one type which are mandatory in the basic structure.
==> We must consider as it have contents
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
subgroup_1 = GroupFactory(academic_year=self.academic_year, education_group_type__name=GroupType.SUB_GROUP.name)
GroupElementYearFactory(parent=education_group_year, child_branch=subgroup_1)
subgroup_2 = GroupFactory(
academic_year=self.academic_year,
education_group_type=subgroup_1.education_group_type,
)
GroupElementYearFactory(parent=education_group_year, child_branch=subgroup_2)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=subgroup_1.education_group_type,
min_count_authorized=1,
)
self.assertTrue(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_contents_because_structure_have_child_which_are_not_mandatory(self):
"""
In this test, we ensure that at least one children are not mandatory groups so they must not be considered
as empty
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(academic_year=self.academic_year)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1
)
GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
child_no_mandatory = GroupFactory(academic_year=self.academic_year)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=0
)
GroupElementYearFactory(parent=education_group_year, child_branch=child_no_mandatory)
self.assertTrue(delete._have_contents_which_are_not_mandatory(education_group_year))
class TestRunDelete(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2019)
def test_delete_case_no_mandatory_structure(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
def test_delete_case_remove_mandatory_structure(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(
academic_year=self.academic_year,
education_group_type__name=GroupType.COMMON_CORE.name
)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1,
)
link_parent_child = GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=child_mandatory.pk)
with self.assertRaises(GroupElementYear.DoesNotExist):
GroupElementYear.objects.get(pk=link_parent_child.pk)
def test_delete_case_remove_mandatory_structure_case_reused_item_which_are_mandatory(self):
"""
In this test, we ensure that the mandatory elem is not removed if it is reused in another structure
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(
academic_year=self.academic_year,
education_group_type__name=GroupType.COMMON_CORE.name
)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1,
)
link_parent_child = GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
# Create another training
another_training = TrainingFactory(academic_year=self.academic_year)
GroupElementYearFactory(parent=another_training, child_branch=child_mandatory)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
with self.assertRaises(GroupElementYear.DoesNotExist):
GroupElementYear.objects.get(pk=link_parent_child.pk)
self.assertEqual(
child_mandatory,
EducationGroupYear.objects.get(pk=child_mandatory.pk)
)
|
agpl-3.0
| 1,811,763,372,489,224,200
| 47.348571
| 120
| 0.699326
| false
| 3.897282
| true
| false
| false
|
dipapaspyros/bdo_platform
|
aggregator/migrations/0015_auto_20180915_2057.py
|
2
|
1556
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-15 17:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('aggregator', '0014_auto_20180913_1531'),
]
operations = [
migrations.CreateModel(
name='DatasetAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateField()),
('end', models.DateField()),
('valid', models.BooleanField()),
],
),
migrations.RemoveField(
model_name='dataset',
name='dataset_user',
),
migrations.AddField(
model_name='datasetaccess',
name='dataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aggregator.Dataset'),
),
migrations.AddField(
model_name='datasetaccess',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='dataset',
name='access_list',
field=models.ManyToManyField(through='aggregator.DatasetAccess', to=settings.AUTH_USER_MODEL),
),
]
|
mit
| 1,340,202,843,143,090,000
| 32.826087
| 114
| 0.587404
| false
| 4.445714
| false
| false
| false
|
eirannejad/pyRevit
|
extensions/pyRevitTools.extension/pyRevit.tab/Modify.panel/edit3.stack/Groups.pulldown/Show Nested Group Structure.pushbutton/script.py
|
1
|
2660
|
# -*- coding: utf-8 -*-
"""List the nested group structure around the selected group or element."""
from pyrevit import revit, DB
from pyrevit import script
output = script.get_output()
selection = revit.get_selection()
class GroupNode:
def __init__(self, group_element, par=None):
self.group = group_element
self.subgroups = self.find_subgroups()
@property
def name(self):
return self.group.Name
@property
def id(self):
return self.group.Id
@property
def members(self):
return [revit.doc.GetElement(x) for x in self.group.GetMemberIds()]
def find_subgroups(self):
subgrps = []
for mem in self.members:
if isinstance(mem, DB.Group):
subgrps.append(GroupNode(mem))
return subgrps
def __len__(self):
return len(self.subgroups)
def __iter__(self):
return self.subgroups
def __repr__(self):
return '<{} name:{}>'.format(self.__class__.__name__, self.name)
def print_tree(groupnode, level, trunk='', branch=''):
"""recursive method for printing (nested) group structure"""
inset = '\t'
fruit = \
branch + '■ {name} {id}'\
.format(name=groupnode.name, id=output.linkify(groupnode.id))
if groupnode.id in selection.element_ids:
print(fruit + '\t<<< selected group element')
elif any([x in selection.element_ids
for x in [y.Id for y in groupnode.members
if not isinstance(y, DB.Group)]]):
print(fruit + '\t<<< selected group members')
else:
print(fruit)
count = len(groupnode)
for idx, sub_grp in enumerate(groupnode):
last = idx == count - 1
if last:
sub_grp_trunk = trunk + inset + ' '
sub_grp_branch = trunk + inset + '└──'
else:
sub_grp_trunk = trunk + inset + '│'
sub_grp_branch = trunk + inset + '├──'
print_tree(sub_grp, level + 1, sub_grp_trunk, sub_grp_branch)
# inspect the selection and find first parents
parent_groups = []
if not selection.is_empty:
for element in selection.elements:
if hasattr(element, 'GroupId'):
firstparent = element
while firstparent.GroupId != DB.ElementId.InvalidElementId:
firstparent = revit.doc.GetElement(firstparent.GroupId)
if isinstance(firstparent, DB.Group):
parent_groups.append(GroupNode(firstparent))
# print group structure for all discovered parent groups
for parent_grp in parent_groups:
print_tree(parent_grp, 0)
print('\n\n')
|
gpl-3.0
| 4,766,360,654,464,250,000
| 27.728261
| 78
| 0.595535
| false
| 3.722535
| false
| false
| false
|
imito/odin
|
odin/ml/scoring.py
|
1
|
10299
|
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.linalg import eigh, cholesky, inv, svd, solve
import tensorflow as tf
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from odin.backend import length_norm, calc_white_mat
from odin.ml.base import BaseEstimator, TransformerMixin, Evaluable
# ===========================================================================
# Cosine Scoring
# ===========================================================================
def compute_class_avg(X, y, classes, sorting=True):
""" compute average vector for each class
Parameters
----------
X: [nb_samples, feat_dim]
y: [nb_samples]
classes: [nb_classes]
assumed numerical classes
sorting: bool
if True, sort the `classes` by numerical order (from small to large)
Return
------
[nb_classes, feat_dim]
Note
----
The given order of each class in `classes` will determine
the row order of returned matrix
"""
if sorting:
classes = sorted(classes, reverse=False)
return np.concatenate([np.mean(X[y == i], axis=0, keepdims=True)
for i in classes],
axis=0)
def compute_within_cov(X, y, classes=None, class_avg=None):
""" Compute the within-classes covariance matrix
Parameters
----------
X : [nb_samples, feat_dim]
y : [nb_samples]
classes : [nb_classes]
assumed numerical classes
class_avg : [nb_classes, feat_dim]
concatenated average vector of each class
Return
------
[feat_dim, feat_dim]
Note
----
The given order of each class in `classes` will determine
the row order of returned matrix
"""
if classes is None and class_avg is None:
raise ValueError("`classes` and `class_avg` cannot be None together")
if classes is not None:
class_avg = compute_class_avg(X, y, classes, sorting=True)
X_mu = X - class_avg[y]
Sw = np.cov(X_mu.T)
return Sw
def compute_wccn(X, y, classes=None, class_avg=None):
""" Within class covariance normalization
Parameters
----------
X : [nb_samples, feat_dim]
y : [nb_samples]
classes : [nb_classes]
assumed numerical classes
class_avg : [nb_classes, feat_dim]
concatenated average vector of each class
Return
------
w: [feat_dim, feat_dim]
where X_norm = dot(X, w)
"""
if classes is None and class_avg is None:
raise ValueError("`classes` and `class_avg` cannot be None together")
Sw = compute_within_cov(X, y, classes, class_avg)
Sw = Sw + 1e-6 * np.eye(Sw.shape[0])
return calc_white_mat(Sw)
class VectorNormalizer(BaseEstimator, TransformerMixin):
""" Perform of sequence of normalization as following
-> Centering: Substract sample mean
-> Whitening: using within-class-covariance-normalization
-> Applying LDA (optional)
-> Length normalization
Parameters
----------
centering : bool (default: True)
mean normalized the vectors
wccn : bool (default: True)
within class covariance normalization
lda : bool (default: True)
Linear Discriminant Analysis
concat : bool (default: False)
concatenate original vector to the normalized vector
Return
------
[nb_samples, feat_dim] if `lda=False`
[nb_samples, nb_classes - 1] if `lda=True` and `concat=False`
[nb_samples, feat_dim + nb_classes - 1] if `lda=True` and `concat=True`
"""
def __init__(self, centering=True, wccn=False, unit_length=True,
lda=False, concat=False):
super(VectorNormalizer, self).__init__()
self._centering = bool(centering)
self._unit_length = bool(unit_length)
self._wccn = bool(wccn)
self._lda = LinearDiscriminantAnalysis() if bool(lda) else None
self._feat_dim = None
self._concat = bool(concat)
# ==================== properties ==================== #
@property
def feat_dim(self):
return self._feat_dim
@property
def is_initialized(self):
return self._feat_dim is not None
@property
def is_fitted(self):
return hasattr(self, '_W')
@property
def enroll_vecs(self):
return self._enroll_vecs
@property
def mean(self):
""" global mean vector """
return self._mean
@property
def vmin(self):
return self._vmin
@property
def vmax(self):
return self._vmax
@property
def W(self):
return self._W
@property
def lda(self):
return self._lda
# ==================== sklearn ==================== #
def _initialize(self, X, y):
if not self.is_initialized:
self._feat_dim = X.shape[1]
assert self._feat_dim == X.shape[1]
if isinstance(y, (tuple, list)):
y = np.asarray(y)
if y.ndim == 2:
y = np.argmax(y, axis=-1)
return y, np.unique(y)
def normalize(self, X, concat=None):
"""
Parameters
----------
X : array [nb_samples, feat_dim]
concat : {None, True, False}
if not None, override the default `concat` attribute of
this `VectorNormalizer`
"""
if not self.is_fitted:
raise RuntimeError("VectorNormalizer has not been fitted.")
if concat is None:
concat = self._concat
if concat:
X_org = X[:] if not isinstance(X, np.ndarray) else X
else:
X_org = None
# ====== normalizing ====== #
if self._centering:
X = X - self._mean
if self._wccn:
X = np.dot(X, self.W)
# ====== LDA ====== #
if self._lda is not None:
X_lda = self._lda.transform(X) # [nb_classes, nb_classes - 1]
# concat if necessary
if concat:
X = np.concatenate((X_lda, X_org), axis=-1)
else:
X = X_lda
# ====== unit length normalization ====== #
if self._unit_length:
X = length_norm(X, axis=-1, ord=2)
return X
def fit(self, X, y):
y, classes = self._initialize(X, y)
# ====== compute classes' average ====== #
enroll = compute_class_avg(X, y, classes, sorting=True)
M = X.mean(axis=0).reshape(1, -1)
self._mean = M
if self._centering:
X = X - M
# ====== WCCN ====== #
if self._wccn:
W = compute_wccn(X, y, classes=None, class_avg=enroll) # [feat_dim, feat_dim]
else:
W = 1
self._W = W
# ====== preprocess ====== #
# whitening the data
if self._wccn:
X = np.dot(X, W)
# length normalization
if self._unit_length:
X = length_norm(X, axis=-1)
# linear discriminant analysis
if self._lda is not None:
self._lda.fit(X, y) # [nb_classes, nb_classes - 1]
# ====== enroll vecs ====== #
self._enroll_vecs = self.normalize(enroll, concat=False)
# ====== max min ====== #
if self._lda is not None:
X = self._lda.transform(X)
X = length_norm(X, axis=-1, ord=2)
vmin = X.min(0, keepdims=True)
vmax = X.max(0, keepdims=True)
self._vmin, self._vmax = vmin, vmax
return self
def transform(self, X):
return self.normalize(X)
class Scorer(BaseEstimator, TransformerMixin, Evaluable):
""" Scorer
Parameters
----------
centering : bool (default: True)
mean normalized the vectors
wccn : bool (default: True)
within class covariance normalization
lda : bool (default: True)
Linear Discriminant Analysis
concat : bool (default: False)
concatenate original vector to the normalized vector
method : {'cosine', 'svm'}
method for scoring
"""
def __init__(self, centering=True, wccn=True, lda=True, concat=False,
method='cosine', labels=None):
super(Scorer, self).__init__()
self._normalizer = VectorNormalizer(
centering=centering, wccn=wccn, lda=lda, concat=concat)
self._labels = labels
method = str(method).lower()
if method not in ('cosine', 'svm'):
raise ValueError('`method` must be one of the following: cosine, svm; '
'but given: "%s"' % method)
self._method = method
# ==================== properties ==================== #
@property
def method(self):
return self._method
@property
def feat_dim(self):
return self._normalizer.feat_dim
@property
def labels(self):
return self._labels
@property
def nb_classes(self):
return len(self._labels)
@property
def is_initialized(self):
return self._normalizer.is_initialized
@property
def is_fitted(self):
return self._normalizer.is_fitted
@property
def normalizer(self):
return self._normalizer
@property
def lda(self):
return self._normalizer.lda
# ==================== sklearn ==================== #
def fit(self, X, y):
# ====== preprocessing ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
if isinstance(y, (tuple, list)):
y = np.asarray(y)
# ====== vector normalizer ====== #
self._normalizer.fit(X, y)
if self._labels is None:
if y.ndim >= 2:
y = np.argmax(y, axis=-1)
self._labels = np.unique(y)
# ====== for SVM method ====== #
if self.method == 'svm':
X = self._normalizer.transform(X)
# normalize to [0, 1]
X = 2 * (X - self._normalizer.vmin) /\
(self._normalizer.vmax - self._normalizer.vmin) - 1
self._svm = SVC(C=1, kernel='rbf', gamma='auto', coef0=1,
shrinking=True, random_state=0,
probability=True, tol=1e-3,
cache_size=1e4, class_weight='balanced')
self._svm.fit(X, y)
self.predict_proba = self._predict_proba
return self
def _predict_proba(self, X):
if self.method != 'svm':
raise RuntimeError("`predict_proba` only for 'svm' method")
return self._svm.predict_proba(self._normalizer.transform(X))
def predict_log_proba(self, X):
return self.transform(X)
def transform(self, X):
# [nb_samples, nb_classes - 1] (if LDA applied)
X = self._normalizer.transform(X)
# ====== cosine scoring ====== #
if self.method == 'cosine':
# [nb_classes, nb_classes - 1]
model_ivectors = self._normalizer.enroll_vecs
test_ivectors = X
scores = np.dot(test_ivectors, model_ivectors.T)
# ====== svm ====== #
elif self.method == 'svm':
X = 2 * (X - self._normalizer.vmin) /\
(self._normalizer.vmax - self._normalizer.vmin) - 1
scores = self._svm.predict_log_proba(X)
return scores
|
mit
| -2,396,844,634,403,191,000
| 27.216438
| 83
| 0.592873
| false
| 3.52223
| false
| false
| false
|
yephper/django
|
django/contrib/auth/views.py
|
1
|
12821
|
import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
def _get_login_redirect_url(request, redirect_to):
# Ensure the user-originating redirection URL is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
@deprecate_current_app
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
extra_context=None, redirect_authenticated_user=False):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name, ''))
if redirect_authenticated_user and request.user.is_authenticated():
redirect_to = _get_login_redirect_url(request, redirect_to)
if redirect_to == request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
elif request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
auth_login(request, form.get_user())
return HttpResponseRedirect(_get_login_redirect_url(request, redirect_to))
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
@never_cache
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
if next_page is not None:
next_page = resolve_url(next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
if (redirect_field_name in request.POST or
redirect_field_name in request.GET):
next_page = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
|
bsd-3-clause
| -5,530,483,616,621,971,000
| 35.488304
| 97
| 0.625614
| false
| 4.210509
| false
| false
| false
|
rascul/botwot
|
plugins/urltitle.py
|
1
|
1618
|
""" Url Title Plugin (botwot plugins.urltitle) """
# Copyright 2014 Ray Schulz <https://rascul.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from bs4 import BeautifulSoup
import requests
from pyaib.plugins import observe, plugin_class
@plugin_class
class UrlTitle(object):
def __init__(self, context, config):
pass
@observe("IRC_MSG_PRIVMSG")
def observe_privmsg(self, context, msg):
""" Look up HTML titles for URLs """
m = re.match(r'(?P<url>https?://\S*)', msg.message)
if m:
# Grab the URL
url = m.groupdict().get("url")
# Make sure url has http:// or https://
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://%s" % url
# Get the page and parse it for title and meta description
try:
page = requests.get(url)
except (requests.exceptions.ConnectionError, requests.exceptions.InvalidURL):
return
if page and page.status_code < 400:
soup = BeautifulSoup(page.text)
if soup and soup.title:
title = soup.title.string[:256]
if title:
msg.reply("%s: %s" % (msg.sender, title))
|
apache-2.0
| -6,002,606,328,788,732,000
| 27.892857
| 80
| 0.687268
| false
| 3.472103
| false
| false
| false
|
pgroudas/pants
|
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py
|
1
|
16630
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import sys
from abc import abstractmethod
from collections import defaultdict
from pants.backend.core.tasks.group_task import GroupMember
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_global_strategy import JvmCompileGlobalStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_isolated_strategy import \
JvmCompileIsolatedStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.jvm_compile.jvm_fingerprint_strategy import JvmFingerprintStrategy
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.goal.products import MultipleRootedProducts
from pants.option.options import Options
from pants.reporting.reporting_utils import items_to_report_element
class JvmCompile(NailgunTaskBase, GroupMember):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--partition-size-hint', type=int, default=sys.maxint, metavar='<# source files>',
help='Roughly how many source files to attempt to compile together. Set to a large '
'number to compile all sources together. Set to 0 to compile target-by-target.')
register('--jvm-options', type=Options.list,
help='Run the compiler with these JVM options.')
register('--args', action='append', default=list(cls.get_args_default(register.bootstrap)),
help='Pass these args to the compiler.')
register('--confs', type=Options.list, default=['default'],
help='Compile for these Ivy confs.')
# TODO: Stale analysis should be automatically ignored via Task identities:
# https://github.com/pantsbuild/pants/issues/1351
register('--clear-invalid-analysis', default=False, action='store_true',
advanced=True,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
register('--warnings', default=True, action='store_true',
help='Compile with all configured warnings enabled.')
register('--warning-args', action='append', default=list(cls.get_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', action='append', default=list(cls.get_no_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are disabled.')
register('--strategy', choices=['global', 'isolated'], default='global',
help='Selects the compilation strategy to use. The "global" strategy uses a shared '
'global classpath for all compiled classes, and the "isolated" strategy uses '
'per-target classpaths.')
JvmCompileGlobalStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
JvmCompileIsolatedStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
@classmethod
def product_types(cls):
return ['classes_by_target', 'classes_by_source', 'resources_by_target']
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
# This task uses JvmDependencyAnalyzer as a helper, get its product needs
JvmDependencyAnalyzer.prepare(options, round_manager)
round_manager.require_data('compile_classpath')
round_manager.require_data('ivy_resolve_symlink_map')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.require_data('java')
round_manager.require_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.require_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_language = None
_file_suffix = None
_supports_concurrent_execution = None
@classmethod
def name(cls):
return cls._language
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@property
def config_section(self):
return self.options_scope
def select(self, target):
return target.has_sources(self._file_suffix)
def create_analysis_tools(self):
"""Returns an AnalysisTools implementation.
Subclasses must implement.
"""
raise NotImplementedError()
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file):
"""Invoke the compiler.
Must raise TaskError on compile failure.
Subclasses must implement."""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def extra_products(self, target):
"""Any extra, out-of-band resources created for a target.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the compile_classpath, and
made available in resources_by_target.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
return []
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
self.setup_artifact_cache()
# The ivy confs for which we're building.
self._confs = self.get_options().confs
# The compile strategy to use for analysis and classfile placement.
if self.get_options().strategy == 'global':
strategy_constructor = JvmCompileGlobalStrategy
else:
assert self.get_options().strategy == 'isolated'
strategy_constructor = JvmCompileIsolatedStrategy
self._strategy = strategy_constructor(self.context,
self.get_options(),
self.workdir,
self.create_analysis_tools(),
self._language,
lambda s: s.endswith(self._file_suffix))
def _jvm_fingerprint_strategy(self):
# Use a fingerprint strategy that allows us to also include java/scala versions.
return JvmFingerprintStrategy(self._platform_version_info())
def _platform_version_info(self):
return [self._strategy.name()] + self._language_platform_version_info()
@abstractmethod
def _language_platform_version_info(self):
"""
Provides extra platform information such as java version that will be used
in the fingerprinter. This in turn ensures different platform versions create different
cache artifacts.
Subclasses must override this and return a list of version info.
"""
pass
def pre_execute(self):
# Only create these working dirs during execution phase, otherwise, they
# would be wiped out by clean-all goal/task if it's specified.
self._strategy.pre_compile()
# TODO(John Sirois): Ensuring requested product maps are available - if empty - should probably
# be lifted to Task infra.
# In case we have no relevant targets and return early create the requested product maps.
self._create_empty_products()
def prepare_execute(self, chunks):
targets_in_chunks = list(itertools.chain(*chunks))
# Invoke the strategy's prepare_compile to prune analysis.
cache_manager = self.create_cache_manager(invalidate_dependents=True,
fingerprint_strategy=self._jvm_fingerprint_strategy())
self._strategy.prepare_compile(cache_manager, self.context.targets(), targets_in_chunks)
def execute_chunk(self, relevant_targets):
if not relevant_targets:
return
# Invalidation check. Everything inside the with block must succeed for the
# invalid targets to become valid.
partition_size_hint, locally_changed_targets = self._strategy.invalidation_hints(relevant_targets)
with self.invalidated(relevant_targets,
invalidate_dependents=True,
partition_size_hint=partition_size_hint,
locally_changed_targets=locally_changed_targets,
fingerprint_strategy=self._jvm_fingerprint_strategy(),
topological_order=True) as invalidation_check:
if invalidation_check.invalid_vts:
# Find the invalid targets for this chunk.
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
# Register products for all the valid targets.
# We register as we go, so dependency checking code can use this data.
valid_targets = list(set(relevant_targets) - set(invalid_targets))
valid_compile_contexts = [self._strategy.compile_context(t) for t in valid_targets]
self._register_vts(valid_compile_contexts)
# Invoke the strategy to execute compilations for invalid targets.
update_artifact_cache_vts_work = (self.get_update_artifact_cache_work
if self.artifact_cache_writes_enabled() else None)
self._strategy.compile_chunk(invalidation_check,
self.context.targets(),
relevant_targets,
invalid_targets,
self.extra_compile_time_classpath_elements(),
self._compile_vts,
self._register_vts,
update_artifact_cache_vts_work)
else:
# Nothing to build. Register products for all the targets in one go.
self._register_vts([self._strategy.compile_context(t) for t in relevant_targets])
def _compile_vts(self, vts, sources, analysis_file, upstream_analysis, classpath, outdir, progress_message):
"""Compiles sources for the given vts into the given output dir.
vts - versioned target set
sources - sources for this target set
analysis_file - the analysis file to manipulate
classpath - a list of classpath entries
outdir - the output dir to send classes to
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
# Do some reporting.
self.context.log.info(
'Compiling ',
items_to_report_element(sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile'):
# The compiler may delete classfiles, then later exit on a compilation error. Then if the
# change triggering the error is reverted, we won't rebuild to restore the missing
# classfiles. So we force-invalidate here, to be on the safe side.
vts.force_invalidate()
self.compile(self._args, classpath, sources, outdir, upstream_analysis, analysis_file)
def check_artifact_cache(self, vts):
post_process_cached_vts = lambda vts: self._strategy.post_process_cached_vts(vts)
return self.do_check_artifact_cache(vts, post_process_cached_vts=post_process_cached_vts)
def _create_empty_products(self):
make_products = lambda: defaultdict(MultipleRootedProducts)
if self.context.products.is_required_data('classes_by_source'):
self.context.products.safe_create_data('classes_by_source', make_products)
# Whether or not anything else requires resources_by_target, this task
# uses it internally.
self.context.products.safe_create_data('resources_by_target', make_products)
# JvmDependencyAnalyzer uses classes_by_target within this run
self.context.products.safe_create_data('classes_by_target', make_products)
def _register_vts(self, compile_contexts):
classes_by_source = self.context.products.get_data('classes_by_source')
classes_by_target = self.context.products.get_data('classes_by_target')
compile_classpath = self.context.products.get_data('compile_classpath')
resources_by_target = self.context.products.get_data('resources_by_target')
# Register class products.
if classes_by_source is not None or classes_by_target is not None:
computed_classes_by_source_by_context = self._strategy.compute_classes_by_source(
compile_contexts)
resource_mapping = self._strategy.compute_resource_mapping(compile_contexts)
for compile_context in compile_contexts:
computed_classes_by_source = computed_classes_by_source_by_context[compile_context]
target = compile_context.target
classes_dir = compile_context.classes_dir
target_products = classes_by_target[target] if classes_by_target is not None else None
for source in compile_context.sources: # Sources are relative to buildroot.
classes = computed_classes_by_source.get(source, []) # Classes are absolute paths.
for cls in classes:
clsname = self._strategy.class_name_for_class_file(compile_context, cls)
resources = resource_mapping.get(clsname, [])
resources_by_target[target].add_abs_paths(classes_dir, resources)
if classes_by_target is not None:
target_products.add_abs_paths(classes_dir, classes)
if classes_by_source is not None:
classes_by_source[source].add_abs_paths(classes_dir, classes)
# Register resource products.
for compile_context in compile_contexts:
extra_resources = self.extra_products(compile_context.target)
# Add to resources_by_target (if it was requested).
if resources_by_target is not None:
target_resources = resources_by_target[compile_context.target]
for root, abs_paths in extra_resources:
target_resources.add_abs_paths(root, abs_paths)
# And to the compile_classpath, to make them available within the next round.
# TODO(stuhood): This is redundant with resources_by_target, but resources_by_target
# are not available during compilation. https://github.com/pantsbuild/pants/issues/206
entries = [(conf, root) for conf in self._confs for root, _ in extra_resources]
compile_classpath.add_for_target(compile_context.target, entries)
|
apache-2.0
| 7,279,666,870,760,293,000
| 43.945946
| 110
| 0.676729
| false
| 4.300491
| false
| false
| false
|
azumimuo/family-xbmc-addon
|
plugin.video.showboxarize/resources/lib/sources_de/1kino.py
|
1
|
4950
|
# -*- coding: utf-8 -*-
"""
Flixnet Add-on
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['1kino.in', 'streamkiste.tv']
self.base_link = 'http://1kino.in'
self.search_link = '/include/live.php?keyword=%s&nonce=%s'
self.search_js = '/js/live-search.js'
def movie(self, imdb, title, localtitle, year):
try:
url = self.__search(title, year)
if not url and title != localtitle: url = self.__search(localtitle, year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
pid = re.findall('[e|t]\s*=\s*"(\w+)"\s*,', r)[0]
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream-container'})[0].content
r = re.compile('<div id="stream-h">.*?</li>.*?</div>\s*</div>', re.IGNORECASE | re.DOTALL).findall(r)
r = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'mirror-head'}), dom_parser.parse_dom(i, 'div', attrs={'id': 'stream-links'})) for i in r]
r = [(i[0][0].content, i[1]) for i in r if i[0]]
r = [(re.findall('.+\s[\||-]\s(.*)', i[0]), i[1]) for i in r]
r = [(i[0][0].strip(), i[1]) for i in r if len(i[0]) > 0]
for name, links in r:
quality, info = source_utils.get_release_quality(name)
links = [dom_parser.parse_dom(i.content, 'a', req=['href', 'title', 'data-mirror', 'data-host']) for i in links]
links = [([i[0].attrs.get('data-mirror'), i[0].attrs.get('data-host'), pid, url], i[0].content) for i in links]
info = ' | '.join(info)
for link, hoster in links:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
link = urllib.urlencode({'mirror': link[0], 'host': link[1], 'pid': link[2], 'ceck': 'sk'})
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
try:
r = client.request(urlparse.urljoin(self.base_link, '/include/load.php'), post=url, XHR=True)
r = r.replace('\r', '').replace('\n', '')
links = [i.attrs['href'] for i in dom_parser.parse_dom(r, 'a', req='href') if i]
ifrms = [i.attrs['src'].strip() for i in dom_parser.parse_dom(r, 'iframe', req='src') if i]
links += ifrms
for link in links:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
if self.base_link in link:
link = client.request(link, output='geturl')
if self.base_link not in link:
return link
except:
return
def __search(self, title, year):
try:
n = client.request(urlparse.urljoin(self.base_link, self.search_js))
try: n = re.findall('nonce=([0-9a-zA-Z]+)', n)[0]
except: n = '273e0f8ea3'
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), n)
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = json.loads(r)
r = [(r[i].get('url'), r[i].get('title'), r[i].get('extra').get('date')) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
|
gpl-2.0
| -1,739,964,767,721,680,600
| 37.679688
| 179
| 0.552525
| false
| 3.568854
| false
| false
| false
|
BlackVegetable/starcraft-oracle
|
sc2reader-master/sc2reader/constants.py
|
1
|
3068
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
# These are found in Repack-MPQ/fileset.{locale}#Mods#Core.SC2Mod#{locale}.SC2Data/LocalizedData/Editor/EditorCategoryStrings.txt
# EDSTR_CATEGORY_Race
# EDSTR_PLAYERPROPS_RACE
# The ??? means that I don't know what language it is.
# If multiple languages use the same set they should be comma separated
LOCALIZED_RACES = {
# enUS
'Terran': 'Terran',
'Protoss': 'Protoss',
'Zerg': 'Zerg',
# ruRU
'Терран': 'Terran',
'Протосс': 'Protoss',
'Зерг': 'Zerg',
# koKR
'테란': 'Terran',
'프로토스': 'Protoss',
'저그': 'Zerg',
# ??eu
'Terranie': 'Terran',
'Protosi': 'Protoss',
'Zergi': 'Zerg',
# zhCH
'人类': 'Terran',
'星灵': 'Protoss',
'异虫': 'Zerg',
# zhTW
'人類': 'Terran',
'神族': 'Protoss',
'蟲族': 'Zerg',
# ???
'Terrano': 'Terran',
# deDE
'Terraner': 'Terran',
# esES - Spanish
# esMX - Latin American
# frFR - French - France
# plPL - Polish Polish
# ptBR - Brazilian Portuguese
}
MESSAGE_CODES = {
'0': 'All',
'2': 'Allies',
'128': 'Header',
'125': 'Ping',
}
GAME_SPEED_FACTOR = {
'Slower': 0.6,
'Slow': 0.8,
'Normal': 1.0,
'Fast': 1.2,
'Faster': 1.4
}
GATEWAY_CODES = {
'US': 'Americas',
'KR': 'Asia',
'EU': 'Europe',
'SG': 'South East Asia',
'XX': 'Public Test',
}
GATEWAY_LOOKUP = {
0: '',
1: 'us',
2: 'eu',
3: 'kr',
5: 'cn',
6: 'sea',
98: 'xx',
}
COLOR_CODES = {
'B4141E': 'Red',
'0042FF': 'Blue',
'1CA7EA': 'Teal',
'EBE129': 'Yellow',
'540081': 'Purple',
'FE8A0E': 'Orange',
'168000': 'Green',
'CCA6FC': 'Light Pink',
'1F01C9': 'Violet',
'525494': 'Light Grey',
'106246': 'Dark Green',
'4E2A04': 'Brown',
'96FF91': 'Light Green',
'232323': 'Dark Grey',
'E55BB0': 'Pink',
'FFFFFF': 'White',
'000000': 'Black',
}
COLOR_CODES_INV = dict(zip(COLOR_CODES.values(), COLOR_CODES.keys()))
REGIONS = {
# United States
'us': {
1: 'us',
2: 'la',
},
# Europe
'eu': {
1: 'eu',
2: 'ru',
},
# Korea - appear to both map to same place
'kr': {
1: 'kr',
2: 'tw',
},
# Taiwan - appear to both map to same place
'tw': {
1: 'kr',
2: 'tw',
},
# China - different url scheme (www.battlenet.com.cn)?
'cn': {
1: 'cn',
},
# South East Asia
'sea': {
1: 'sea',
},
# Singapore
'sg': {
1: 'sg',
},
# Public Test
'xx': {
1: 'xx',
},
}
import json
import pkgutil
attributes_json = pkgutil.get_data('sc2reader.data', 'attributes.json').decode('utf8')
attributes_dict = json.loads(attributes_json)
LOBBY_PROPERTIES = dict()
for key, value in attributes_dict.get('attributes', dict()).items():
LOBBY_PROPERTIES[int(key)] = value
|
mit
| 1,542,272,283,265,632,300
| 17.359756
| 129
| 0.50714
| false
| 2.615986
| false
| false
| false
|
kundajelab/simdna
|
simdna/simdnautil/pwm.py
|
1
|
5397
|
from __future__ import absolute_import, division, print_function
import numpy as np
from simdna.simdnautil.util import DEFAULT_LETTER_TO_INDEX
from simdna.simdnautil import util
import math
class PWM(object):
"""
Object representing a position weight matrix;
allows sampling from the PWM either randomly or taking the best hit.
:param name: name of the PWM
:param letterToIndex: dictionary mapping from letter to index. Defaults
to ACGT ordering.
:param probMatrix: rows of the PWM (in probability space). Can be added
later too by calling addRows.
:param pseudocountProb: smoothing factor to add to probMatrix. Specify
this in the constructor if you are also specifying probMatrix in
the constructor.
"""
def __init__(self, name, letterToIndex=DEFAULT_LETTER_TO_INDEX,
probMatrix=None, pseudocountProb=None):
self.name = name
self.letterToIndex = letterToIndex
self.indexToLetter = dict(
(self.letterToIndex[x], x) for x in self.letterToIndex)
self._rows = []
self._finalised = False
if (probMatrix is not None):
self.addRows(matrix=probMatrix)
if (pseudocountProb is not None):
assert probMatrix is not None,(
"please specify probMatrix in the constructor if you are"
+"going to specify pseudocountProb in the constructor")
self.finalise(pseudocountProb=pseudocountProb)
def add_row(self, weights):
self.addRow(weights)
"""
Add row to the end of the PWM. Must be specified in probability
space.
:param weights: a row of the PWM (in probability space)
"""
def addRow(self, weights):
if (len(self._rows) > 0):
assert len(weights) == len(self._rows[0])
self._rows.append(weights)
"""
See addRows
"""
def add_rows(self, matrix):
self.addRows(matrix)
"""
Add rows of 'matrix' to the end of the PWM. Must be specified in probability
space.
:param matrix: rows of the PWM (in probability space)
:return: self
"""
def addRows(self, matrix):
for row in matrix:
self.addRow(weights=row)
return self
def finalize(self, pseudocountProb=0.001):
self.finalise(pseudocountProb=pseudocountProb)
def finalise(self, pseudocountProb=0.001):
"""
Function run after loading the weight matrix to smooth
the PWM after loading is complete
:param pseudocountProb: smoothing factor
:return:
"""
assert pseudocountProb >= 0 and pseudocountProb < 1
# will smoothen the rows with a pseudocount...
self._rows = np.array(self._rows)
self._rows = self._rows * \
(1 - pseudocountProb) + float(pseudocountProb) / len(self._rows[0])
for row in self._rows:
assert(abs(sum(row) - 1.0) < 0.0001)
self._logRows = np.log(self._rows)
self._finalised = True
self.bestPwmHit = self.computeBestHitGivenMatrix(self._rows)
self.pwmSize = len(self._rows)
return self
def get_best_hit(self):
return self.bestPwmHit
def getBestHit(self):
return self.bestPwmHit
def compute_best_hit_given_matrix(self, matrix):
"""
Compute the highest probability instance of the PWM
:param matrix: the matrix to use to copmute the PWM
:return: the string best hit
"""
return "".join(self.indexToLetter[x] for x in (np.argmax(matrix, axis=1)))
def computeBestHitGivenMatrix(self, matrix):
"""
Compute the highest probability instance of the PWM
:param matrix: the matrix to use to copmute the PWM
:return: the string best hit
"""
return "".join(self.indexToLetter[x] for x in (np.argmax(matrix, axis=1)))
def get_rows(self):
return self.getRows()
def getRows(self):
if (not self._finalised):
raise RuntimeError("Please call finalise on " + str(self.name))
return self._rows
def sample_from_pwm(self, bg=None):
self.sampleFromPwm(bg=bg)
def sampleFromPwm(self, bg=None):
"""
Randomly sample according to the PWM; if a background is included
then compute the logodds relative to that background and return.
:param bg: background frequency to compute relative to
:return: sample or (sample and logodds) if bg is not None
"""
if (not self._finalised):
raise RuntimeError("Please call finalise on " + str(self.name))
sampledLetters = []
logOdds = 0
for row in self._rows:
sampledIndex = util.sampleFromProbsArr(row)
letter = self.indexToLetter[sampledIndex]
if (bg is not None):
logOdds += np.log(row[sampledIndex]) - np.log(bg[letter])
sampledLetters.append(letter)
sampledHit = "".join(sampledLetters)
if (bg is not None):
return (sampledHit, logOdds)
else:
return sampledHit
def sample_from_pwm_and_score(self, bg):
return self.sampleFromPwm(bg=bg)
def sampleFromPwmAndScore(self, bg):
return self.sampleFromPwm(bg=bg)
def __str__(self):
return self.name + "\n" + str(self._rows)
|
mit
| -9,102,593,418,064,766,000
| 33.158228
| 82
| 0.621642
| false
| 3.971302
| false
| false
| false
|
MichalBusta/FASText
|
tools/icdarUtils.py
|
1
|
41513
|
'''
Created on Jan 7, 2015
@author: busta
'''
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import utils
from ft import FASTex
import pylab
import pandas
def draw_missed_letters(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 12, inter = True, scalingFactor=0.5):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars['missing_letters']
missing_letters = dict(missing_letters.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
border = 25
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
gt0 = miss[1]
gt = [gt0[0] - border, gt0[1] - border, gt0[2] + border, gt0[3] + border ]
gt[0] = max(0, gt[0])
gt[1] = max(0, gt[1])
if color == 1:
img = cv2.imread(miss[0])
else:
img = cv2.imread(miss[0], 0)
gt[2] = min(img.shape[1], gt[2])
gt[3] = min(img.shape[0], gt[3])
baseName = os.path.basename(miss[0])
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
scales = ft.getImageScales()
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
octaves = np.unique( keypointsInside[:, 2])
maxOctave = 0
if octaves.shape[0] > 0:
maxOctave = np.max(octaves)
mask = (keypoints[:, 0] > gt[0]) * (keypoints[:, 0] < gt[2]) * (keypoints[:, 1] > gt[1]) * (keypoints[:, 1] < gt[3])
images = []
octPoints = []
octScales = []
keypointsInRect = keypoints[mask, :]
for i in range(int(maxOctave) + 1):
scale = scales[i]
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
octavePoints = keypointsInRect[keypointsInRect[:, 2] == i, :].copy()
if octavePoints.shape[0] > 0:
dst = ft.getImageAtScale(i)
images.append(dst)
octavePoints[:, 0] *= scales[i]
octavePoints[:, 1] *= scales[i]
octavePoints[:, 5] *= scales[i]
octavePoints[:, 6] *= scales[i]
octavePoints[:, 7] *= scales[i]
octavePoints[:, 8] *= scales[i]
octPoints.append(octavePoints)
octScales.append(scale)
f, axes = plt.subplots(1, 1 + len(images), figsize=(16, 3))
if len(images) > 0:
ax = axes[0]
else:
ax = axes
if color == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
zoom = img[gt[1]:gt[3], gt[0]:gt[2]]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
kpMask = keypoints[mask]
kpMask[:, 0] = kpMask[:, 0] - gt[0]
kpMask[:, 1] = kpMask[:, 1] - gt[1]
kpMask[:, 7] = kpMask[:, 7] - gt[0]
kpMask[:, 8] = kpMask[:, 8] - gt[1]
ax.plot(kpMask[:, 0], kpMask[:, 1], 'ro')
for k in range(kpMask.shape[0]):
ax.plot([kpMask[k,0], kpMask[k,7]], [kpMask[k,1], kpMask[k,8]], 'r-')
style = 'rx'
if kpMask.shape[1] > 9:
for k in range(3):
maski = kpMask[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "rs"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot([kpMask[maski,7]], [kpMask[maski,8]], style)
mask = (keypointsInside[:, 0] > gt[0]) * (keypointsInside[:, 0] < gt[2]) * (keypointsInside[:, 1] > gt[1]) * (keypointsInside[:, 1] < gt[3])
kpMask = keypointsInside[mask]
keypointsInside[:, 0] = keypointsInside[:, 0] - gt[0]
keypointsInside[:, 1] = keypointsInside[:, 1] - gt[1]
keypointsInside[:, 7] = keypointsInside[:, 7] - gt[0]
keypointsInside[:, 8] = keypointsInside[:, 8] - gt[1]
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
for k in range(keypointsInside.shape[0]):
ax.plot([keypointsInside[k,0], keypointsInside[k,7]], [keypointsInside[k,1], keypointsInside[k,8]], 'g-')
ax.set_xlim(0, gt[2] - max(0, gt[0]))
ax.set_ylim((gt[3] - max(0, gt[1]), 0))
line = mlines.Line2D(np.array([gt0[0] - gt[0], gt0[2] - gt[0], gt0[2] - gt[0], gt0[0] - gt[0], gt0[0] - gt[0]]), np.array([gt0[1] - gt[1], gt0[1] - gt[1], gt0[3] - gt[1], gt0[3] - gt[1], gt0[1] - gt[1]]), lw=5., alpha=0.4, color='b')
ax.add_line(line)
f.suptitle('Missing letter: {0} ({1})'.format(gt0[4], miss[0]))
for ai in range(len(images)):
ax = axes[ai + 1]
scale = octScales[ai]
gts = (gt[0] * scale, gt[1] * scale, gt[2] * scale, gt[3] * scale)
ax.plot(octPoints[ai][:, 0] - gts[0], octPoints[ai][:, 1] - gts[1], 'ro')
zoom = images[ai][int(gt[1] * scale):int(gt[3] * scale), int(gt[0] * scale):int(gt[2] * scale)]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
ax.set_xlim(0, gts[2] - max(0, gts[0]))
ax.set_ylim((gts[3] - max(0, gts[1]), 0))
plt.show()
def draw_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 12, inter = True, scalingFactor=0.5):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
missing_segm = vars['missing_segm']
missing_segm = dict(missing_segm.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
for image in missing_segm.keys():
arr = missing_segm[image]
if color == 1:
img = cv2.imread(image)
else:
img = cv2.imread(image, 0)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
baseName = os.path.basename(image)
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
f = plt.figure(num = 110)
ax = f.add_subplot(111)
ax.imshow(img, cmap=pylab.gray(), interpolation='nearest')
style = "rx"
for k in range(6):
maski = keypoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot(keypoints[maski, 0], keypoints[maski, 1], style)
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
ax.set_xlim(0, img.shape[1])
ax.set_ylim(img.shape[0], 0)
for i in range(len(arr)):
miss_gt = arr[i]
line = mlines.Line2D(np.array([miss_gt[0], miss_gt[2], miss_gt[2], miss_gt[0], miss_gt[0]]), np.array([miss_gt[1], miss_gt[1], miss_gt[3], miss_gt[3], miss_gt[1]]), lw=5., alpha=0.4, color='b')
ax.add_line(line)
ax.set_title('Missing segmentation: {0}'.format(image))
plt.show()
def plot_keypoints_histograms(vars_dict):
f, ax = plt.subplots(2, sharex=True)
hist = vars_dict['hist']
ax[0].plot(hist)
ax[0].set_title('FAST Keypoints Histogram')
plt.xlabel('Intensity')
plt.ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
hist = vars_dict['histFp']
ax[1].plot(hist)
ax[1].set_title('FAST Keypoints Histogram - False Positives')
ax[1].set_xlim([0, 255])
f, ax = plt.subplots(2, sharex=True)
histDist = vars_dict['histDist']
ax[0].plot(histDist)
ax[0].set_title('FAST Keypoints Scores')
plt.xlabel('Score')
plt.ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
histDistFp = vars_dict['histDistFp']
ax[1].plot(histDistFp)
ax[1].set_title('FAST Keypoints Scores')
ax[1].set_xlim([0, 255])
f, ax = plt.subplots(4, sharex=True)
histDist = vars_dict['histDistMax']
ax[0].plot(histDist)
ax[0].set_title('Keypoints on Letter')
plt.xlabel('Distance')
ax[0].set_ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
histDistFp = vars_dict['histDistMaxFp']
ax[1].plot(histDistFp)
ax[1].set_title('Keypoints Outside Letter')
ax[1].set_ylabel('Keypoints Count')
ax[1].set_xlim([0, 255])
histDistMaxWhite = vars_dict['histDistMaxWhite']
ax[2].plot(histDistMaxWhite)
ax[2].set_title('Black Ink Keypoints')
ax[2].set_ylabel('Keypoints Count')
ax[2].set_xlim([0, 255])
histDistMaxWhiteFp = vars_dict['histDistMaxWhiteFp']
ax[3].plot(histDistMaxWhiteFp)
ax[3].set_title('Black Ink Keypoints - Outside')
ax[3].set_ylabel('Keypoints Count')
ax[3].set_xlim([0, 255])
hist2dDist = vars_dict['hist2dDist']
hist2dDistFp = vars_dict['hist2dDistFp']
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(17, 8))
ax[0].set_xlabel('Intensity')
ax[0].set_ylabel('Max Distance')
ax[0].set_xlim([0, 255])
ax[0].set_ylim([0, 255])
imgplot = ax[0].imshow(hist2dDist, interpolation='nearest', origin='low')
ax[0].set_title('Kepoints Inside')
imgplot.set_cmap('hot')
ax[1].set_title('Kepoints Ouside')
ax[1].set_xlabel('Intensity')
ax[1].set_ylabel('Max Distance')
imgplot = ax[1].imshow(hist2dDistFp, interpolation='nearest', origin='low')
imgplot.set_cmap('hot')
ax[1].set_xlim([0, 255])
ax[1].set_ylim([0, 255])
hist2dDist = vars_dict['hist2dDistScore']
hist2dDistFp = vars_dict['hist2dDistScoreFp']
fig, ax = plt.subplots()
ax.set_xlabel('Score')
ax.set_ylabel('DistMax')
imgplot = ax.imshow(hist2dDist, interpolation='nearest', origin='low')
ax.set_title('Kepoints Inside')
imgplot.set_cmap('hot')
fig, ax = plt.subplots()
ax.set_title('Kepoints Ouside')
ax.set_xlabel('Score')
ax.set_ylabel('DistMax')
imgplot = ax.imshow(hist2dDistFp, interpolation='nearest', origin='low')
imgplot.set_cmap('hot')
def collect_histograms(img, segmImg, keypoints, values, diffValMax, keypointsTotalInside, diffMaxOctavesMap, diffScoreOctavesMap, hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp):
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = (centers == (255, 255, 255))
keypointsInsideMask = np.invert( np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]) )
keypointsTotalInside += np.count_nonzero(keypointsInsideMask)
centers2 = segmImg[keypoints[:, 8].astype(int), keypoints[:, 7].astype(int)]
keypointsInsideMask2 = (centers2 == (255, 255, 255))
keypointsInsideMask2 = np.invert( np.bitwise_and(np.bitwise_and(keypointsInsideMask2[:, 0], keypointsInsideMask2[:, 1]), keypointsInsideMask2[:, 2]) )
keypointsInsideMask = np.bitwise_or(keypointsInsideMask, keypointsInsideMask2)
keypointsInside = keypoints[keypointsInsideMask, :]
maskBlackInk = img[keypoints[:, 8].astype(int), keypoints[:, 7].astype(int)] <= img[keypoints[:, 6].astype(int), keypoints[:, 5].astype(int)]
maskWhiteInk = np.invert(maskBlackInk)
octaves = np.unique( keypointsInside[:, 2])
if len(octaves) > 0:
maxOctave = np.max(octaves)
difMaxInside = diffValMax[keypointsInsideMask]
for i in range(int(maxOctave) + 1):
difMaxInsideOctave = difMaxInside[keypointsInside[:, 2] == i]
keypointsOctaveScore = keypointsInside[keypointsInside[:, 2] == i, 3]
if difMaxInsideOctave.shape[0] > 0:
if diffMaxOctavesMap.has_key(i):
diffMaxOctavesMap[i] = np.hstack( (diffMaxOctavesMap[i], np.copy(difMaxInsideOctave)))
diffScoreOctavesMap[i] = np.hstack( (diffScoreOctavesMap[i], np.copy(keypointsOctaveScore) ) )
else:
diffMaxOctavesMap[i] = np.copy(difMaxInsideOctave)
diffScoreOctavesMap[i] = np.copy(keypointsOctaveScore)
bins = np.arange(255)
if hist is None:
hist = np.histogram(values[keypointsInsideMask], bins=bins)[0]
histDist = np.histogram(keypointsInside[:, 3], bins=bins)[0]
histDistMax = np.histogram(diffValMax[keypointsInsideMask], bins=bins)[0]
histDistMaxWhite = np.histogram(diffValMax[np.bitwise_and(keypointsInsideMask, maskWhiteInk)], bins=bins)[0]
hist2dDist = np.histogram2d(values[keypointsInsideMask], diffValMax[keypointsInsideMask], [bins, bins])[0]
hist2dDistScore = np.histogram2d(keypointsInside[:, 3].astype(np.uint8), diffValMax[keypointsInsideMask], [bins, bins])[0]
else:
hist = np.add(hist, np.histogram(values[keypointsInsideMask], bins)[0])
histDist = np.add(histDist, np.histogram(keypointsInside[:, 3], bins=bins)[0])
histDistMax = np.add(histDistMax, np.histogram(diffValMax[keypointsInsideMask], bins=bins)[0])
histDistMaxWhite = np.add(histDistMaxWhite, np.histogram(diffValMax[np.bitwise_and(keypointsInsideMask, maskWhiteInk)], bins=bins)[0])
hist2dDist = np.add(hist2dDist, np.histogram2d(values[keypointsInsideMask], diffValMax[keypointsInsideMask], [bins, bins])[0])
hist2dDistScore = np.add(hist2dDistScore, np.histogram2d(keypointsInside[:, 3].astype(np.uint8), diffValMax[keypointsInsideMask], [bins, bins])[0])
outsideMask = np.invert(keypointsInsideMask)
keypointsOutside = keypoints[outsideMask, :]
valuesFp = img[keypointsOutside[:, 1].astype(int), keypointsOutside[:, 0].astype(int)]
if histFp is None:
histFp = np.histogram(valuesFp, bins=bins)[0]
histDistFp = np.histogram(keypointsOutside[:, 3], bins=bins)[0]
histDistMaxFp = np.histogram(diffValMax[outsideMask], bins=bins)[0]
histDistMaxWhiteFp = np.histogram(diffValMax[np.bitwise_and(outsideMask, maskWhiteInk)], bins=bins)[0]
hist2dDistFp = np.histogram2d(values[outsideMask], diffValMax[outsideMask], [bins, bins])[0]
hist2dDistScoreFp = np.histogram2d(keypointsOutside[:, 3], diffValMax[outsideMask], [bins, bins])[0]
else:
histFp = np.add(histFp, np.histogram(valuesFp, bins)[0])
histDistFp = np.add(histDistFp, np.histogram(keypointsOutside[:, 3], bins=bins)[0])
histDistMaxFp = np.add(histDistMaxFp, np.histogram(diffValMax[outsideMask], bins=bins)[0])
histDistMaxWhiteFp = np.add(histDistMaxWhiteFp, np.histogram(diffValMax[np.bitwise_and(outsideMask, maskWhiteInk)], bins=bins)[0])
hist2dDistFp = np.add(hist2dDistFp, np.histogram2d(values[outsideMask], diffValMax[outsideMask], [bins, bins])[0])
hist2dDistScoreFp = np.add(hist2dDistScoreFp, np.histogram2d(keypointsOutside[:, 3], diffValMax[outsideMask], [bins, bins])[0])
return (hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp, keypointsInside)
def draw_missed_letters_figure(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 13, inter = True, scalingFactor=0.5, segmList=[]):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars_dict.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars_dict['missing_letters']
missing_letters = dict(missing_letters.tolist())
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
missLetter = []
imagesMiss = {}
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
if len(segmList) > 0:
base = os.path.basename(miss[0])
if not base in segmList:
continue
missLetter.append(miss)
if imagesMiss.has_key(miss[0]):
imagesMiss[miss[0]].append( miss[1] )
else:
imagesMiss[miss[0]] = []
imagesMiss[miss[0]].append( miss[1] )
for image in imagesMiss.keys():
f = plt.figure(num = 250)
ax = f.add_subplot(111)
imgc2 = cv2.imread(image)
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
imgc2 = cv2.cvtColor(imgc2, cv2.COLOR_BGR2RGB)
ax.imshow(imgc2)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
octaves = np.unique( keypoints[:, 2])
maxOctave = np.max(octaves)
scales = ft.getImageScales()
for i in range(int(maxOctave) + 1):
octavePoints = keypoints[keypoints[:, 2] == i, :]
c = 'red'
if len(octavePoints) > 0 and octavePoints.shape[1] > 9:
for k in range(6):
maski = octavePoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
c = 'blue'
if k == 5:
style = "yo"
continue
s = 10 / scales[i]
ax.scatter(octavePoints[maski, 0], octavePoints[maski, 1],c=c, s=s )
for i in range(len(imagesMiss[image])):
gt0 = imagesMiss[image][i]
line = mlines.Line2D(np.array([gt0[0], gt0[2], gt0[2], gt0[0], gt0[0]]), np.array([gt0[1], gt0[1], gt0[3], gt0[3], gt0[1]]), lw=5., alpha=0.6, color='r')
ax.add_line(line)
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
ax.set_xlim([0, imgc2.shape[1]])
ax.set_ylim([imgc2.shape[0], 0])
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
plt.show()
def draw_missed_letters_tile(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 13, inter = True, scalingFactor=1.6, segmList=[]):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars_dict.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars_dict['missing_letters']
missing_letters = dict(missing_letters.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
segmDir = '/datagrid/personal/TextSpotter/evaluation-sets/icdar2013-Test/segmentations'
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
border = 15
missLetter = []
imagesMiss = {}
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
if len(segmList) > 0:
base = os.path.basename(miss[0])
if not base in segmList:
continue
missLetter.append(miss)
if imagesMiss.has_key(miss[0]):
imagesMiss[miss[0]].append( miss[1] )
else:
imagesMiss[miss[0]] = []
imagesMiss[miss[0]].append( miss[1] )
rowSize = len(imagesMiss.keys())
f, axes = plt.subplots(2, len(imagesMiss.keys()))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
figNo = 0
for image in imagesMiss.keys():
if len(imagesMiss.keys()) > 1:
ax0 = axes[0][figNo]
ax = axes[1][figNo]
else:
ax0 = axes[figNo]
ax = axes[figNo]
figNo += 1
if color == 1:
img = cv2.imread(image)
else:
img = cv2.imread(image, 0)
baseName = os.path.basename(image)
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
if color == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(len(imagesMiss[image])):
if i == 0:
orBox = imagesMiss[image][0]
else:
orBox = utils.union(orBox, imagesMiss[image][i])
gt0 = orBox
gt = [gt0[0] - border, gt0[1] - border, gt0[2] + border, gt0[3] + border ]
gt[0] = max(0, gt[0])
gt[1] = max(0, gt[1])
gt[2] = min(img.shape[1], gt[2])
gt[3] = min(img.shape[0], gt[3])
zoom = img[gt[1]:gt[3], gt[0]:gt[2]]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
ax0.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
mask = (keypoints[:, 0] > gt[0]) * (keypoints[:, 0] < gt[2]) * (keypoints[:, 1] > gt[1]) * (keypoints[:, 1] < gt[3])
kpMask = keypoints[mask]
kpMask[:, 0] = kpMask[:, 0] - gt[0]
kpMask[:, 1] = kpMask[:, 1] - gt[1]
kpMask[:, 7] = kpMask[:, 7] - gt[0]
kpMask[:, 8] = kpMask[:, 8] - gt[1]
ax.plot(kpMask[:, 0], kpMask[:, 1], 'ro')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax0.xaxis.set_ticklabels([])
ax0.yaxis.set_ticklabels([])
for k in range(kpMask.shape[0]):
ax.plot([kpMask[k,0], kpMask[k,7]], [kpMask[k,1], kpMask[k,8]], 'r-')
style = 'rx'
if kpMask.shape[1] > 9:
for k in range(3):
maski = kpMask[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "rs"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot([kpMask[maski,7]], [kpMask[maski,8]], style)
for i in range(len(imagesMiss[image])):
gt0 = imagesMiss[image][i]
mask = (keypointsInside[:, 0] > gt[0]) * (keypointsInside[:, 0] < gt[2]) * (keypointsInside[:, 1] > gt[1]) * (keypointsInside[:, 1] < gt[3])
kpMask = keypointsInside[mask]
keypointsInside[:, 0] = keypointsInside[:, 0] - gt[0]
keypointsInside[:, 1] = keypointsInside[:, 1] - gt[1]
keypointsInside[:, 7] = keypointsInside[:, 7] - gt[0]
keypointsInside[:, 8] = keypointsInside[:, 8] - gt[1]
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
for k in range(keypointsInside.shape[0]):
ax.plot([keypointsInside[k,0], keypointsInside[k,7]], [keypointsInside[k,1], keypointsInside[k,8]], 'g-')
ax.set_xlim(0, gt[2] - max(0, gt[0]))
ax.set_ylim((gt[3] - max(0, gt[1]), 0))
line = mlines.Line2D(np.array([gt0[0] - gt[0], gt0[2] - gt[0], gt0[2] - gt[0], gt0[0] - gt[0], gt0[0] - gt[0]]), np.array([gt0[1] - gt[1], gt0[1] - gt[1], gt0[3] - gt[1], gt0[3] - gt[1], gt0[1] - gt[1]]), lw=5., alpha=0.6, color='r')
ax0.add_line(line)
plt.show()
def computeWordOvelap(imgc, word_gt, words, wordsOk, wordsFp):
best_match = 0
best_match2 = 0
for det_word in words:
try:
cv2.rectangle(imgc, (det_word[0], det_word[1]), (det_word[2], det_word[3]), (0, 0, 255))
for gt_box in word_gt:
rect_int = utils.intersect( det_word, gt_box )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(det_word, gt_box))
ratio = int_area / float(union_area)
ratio2 = int_area / utils.area(gt_box)
if ratio > best_match:
best_match = ratio
w = det_word
best_match2 = ratio2
if best_match2 > 0.3:
wordsOk.append(det_word)
elif best_match == 0:
wordsFp.append(det_word)
except:
pass
return (best_match, best_match2)
evalPunctuation = False
def computeSegmOverlap(gt_rects, segmentations, MIN_SEGM_OVRLAP = 0.6):
segm2chars = 0
for k in range(len(gt_rects)):
gt_rect = gt_rects[k]
best_match = 0
best_match_line = 0
if (gt_rect[4] == ',' or gt_rect[4] == '.' or gt_rect[4] == '\'' or gt_rect[4] == ':' or gt_rect[4] == '-') and not evalPunctuation:
continue
best_match2 = 0
for detId in range(segmentations.shape[0]):
rectn = segmentations[detId, :]
rect_int = utils.intersect( rectn, gt_rect )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(rectn, gt_rect))
ratio = int_area / float(union_area)
if ratio > best_match:
best_match = ratio
if ratio > best_match_line and rectn[7] == 1.0 :
best_match_line = ratio
gt_rect[5] = best_match
if best_match < MIN_SEGM_OVRLAP:
if k < len(gt_rects) - 1:
gt_rect2 = gt_rects[k + 1]
chars2Rect = utils.union(gt_rect2, gt_rect)
rect_int = utils.intersect( rectn, chars2Rect )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(rectn, chars2Rect))
ratio = int_area / float(union_area)
if ratio > best_match2:
if ratio > MIN_SEGM_OVRLAP:
segm2chars += 1
best_match2 = ratio
gt_rect[5] = ratio
gt_rect2[5] = ratio
def read_segm_data(input_dir, prefix = ""):
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
ms = []
dirs = []
for dir_name in subdirs:
inputFile = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(inputFile):
continue
vars_dict = np.load(inputFile)
missing_segm = vars_dict['missing_segm']
missing_segm = dict(missing_segm.tolist())
ms.append(missing_segm)
dirs.append(prefix + os.path.basename(dir_name))
return (ms, dirs)
def compare_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation', input_dir2='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentationg', showPictures = False):
ft = FASTex()
(ms, dirs) = read_segm_data(input_dir)
(ms2, dirs2) = read_segm_data(input_dir2, 'g')
ms.extend(ms2)
dirs.extend(dirs2)
sumHash = {}
for j in np.arange(0, len(ms)):
missing_segm = ms[j]
for image in missing_segm.keys():
arr = missing_segm[image]
if not sumHash.has_key(image):
sumHash[image] = arr
continue
for i in range(len(arr)):
miss_gt = arr[i]
check = sumHash[image]
hasGt = False
for k in range(len(check)):
miss_gt2 = check[k]
if miss_gt == miss_gt2:
hasGt = True
if not hasGt:
sumHash[image].append(miss_gt)
missing_segm = ms[0]
data = []
dataf = []
gt_id = 0
columns = ['Img', 'GT Id']
for image in sumHash.keys():
arr = sumHash[image]
f = None
for i in range(len(arr)):
orValue = False
miss_gt = arr[i]
row = []
row.append(os.path.basename(image))
row.append(gt_id)
gt_id += 1
rowf = []
for j in np.arange(0, len(ms)):
if gt_id == 1:
columns.append(dirs[j])
msj = ms[j]
hasSegmj = True
val = 1
if msj.has_key(image):
arrj = msj[image]
for k in range(len(arrj)):
miss_gtj = arrj[k]
if miss_gtj == miss_gt:
hasSegmj = False
val = 0
break
row.append(hasSegmj)
rowf.append(val)
orValue = orValue or hasSegmj
if orValue:
rowf.append(1)
else:
rowf.append(0)
if showPictures:
img = cv2.imread(image)
imgg = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
if f == None:
f, axes = plt.subplots(1, 2, figsize=(16, 3))
f.suptitle('Missing segmentation: {0}'.format(image))
ax = axes[0]
ax.imshow(img, cmap=pylab.gray(), interpolation='nearest')
ax = axes[1]
ax.imshow(imgg, cmap=pylab.gray(), interpolation='nearest')
orBox = miss_gt
segmentations = ft.getCharSegmentations(imgg)
keypoints = ft.getLastDetectionKeypoints()
style = 'rx'
for k in range(5):
maski = keypoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
ax.plot(keypoints[maski, 0], keypoints[maski, 1], style)
for k in range(keypoints.shape[0]):
ax.plot([keypoints[k,0], keypoints[k,7]], [keypoints[k,1], keypoints[k,8]], 'r-')
ax = axes[0]
else:
orBox = utils.union(orBox, miss_gt)
line = mlines.Line2D(np.array([miss_gt[0], miss_gt[2], miss_gt[2], miss_gt[0], miss_gt[0]]), np.array([miss_gt[1], miss_gt[1], miss_gt[3], miss_gt[3], miss_gt[1]]), lw=5., alpha=0.6, color='r')
ax.add_line(line)
row.append(orValue)
data.append(row)
dataf.append(rowf)
if f != None:
ax = axes[0]
ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
ax = axes[1]
ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
plt.show()
columns.append("OR")
data = np.array(data)
dataf = np.array(dataf)
df = pandas.DataFrame(data = data, columns=columns)
#print(df)
sumCols = dataf.sum(0)
sumCols = dataf.shape[0] - sumCols
print("Missing Segmentations:")
print(sumCols)
indices = np.argsort(sumCols)
bestFactor = indices[1]
missing_segm = ms[bestFactor]
print( "Best factor: {0}".format(dirs[bestFactor]) )
maskBest = dataf[:, bestFactor] == 0
datafSec = dataf[maskBest, :]
sumCols = datafSec.sum(0)
sumCols = datafSec.shape[0] - sumCols
print("Missing Segmentations 2 best:")
print(sumCols)
indices = np.argsort(sumCols)
bestFactor2 = indices[1]
print( "Best factor 2: {0}, missing segmentations: {1} -> {2}".format(dirs[bestFactor2], datafSec.shape[0], sumCols[indices[1]]) )
maskBest = datafSec[:, bestFactor2] == 0
dataf3 = datafSec[maskBest, :]
sumCols = dataf3.sum(0)
sumCols = dataf3.shape[0] - sumCols
indices = np.argsort(sumCols)
bestFactor2 = indices[1]
print( "Best factor 3: {0}, missing segmentations: {1} -> {2}".format(dirs[bestFactor2], dataf3.shape[0], sumCols[indices[1]]) )
if __name__ == '__main__':
draw_missed_letters('/tmp/evalTest')
segmList = []
segmList.append( 'img_49.jpg' )
segmList.append( 'img_168.jpg' )
segmList.append( 'img_206.jpg' )
segmList.append( 'img_86.jpg' )
segmList.append( 'img_205.jpg' )
segmList.append( 'img_232.jpg' )
segmList.append( 'img_34.jpg' )
segmList.append( 'img_230.jpg' )
draw_missed_letters_figure(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Test', color = 0, edgeThreshold = 13, inter = True, segmList=segmList)
'''
compare_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation', input_dir2='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentationg', showPictures = True)
plotSegmRoc('/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation')
'''
|
gpl-2.0
| 3,569,044,324,166,943,000
| 38.57388
| 294
| 0.526004
| false
| 3.405217
| false
| false
| false
|
elzaggo/pydoop
|
examples/pseudo_terasort/pteragen.py
|
1
|
3734
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Generate a GraySort input data set.
The user specifies the number of rows and the output directory and this
class runs a map/reduce program to generate the data.
The format of the data is:
* (10 bytes key) (constant 2 bytes) (32 bytes rowid)
(constant 4 bytes) (48 bytes filler) (constant 4 bytes)
* The rowid is the right justified row id as a hex number.
"""
import struct
import random
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
from ioformats import Writer
TERAGEN = "TERAGEN"
CHECKSUM = "CHECKSUM"
SEED = 423849
CACHE_SIZE = 16 * 1024
getrandbits = random.getrandbits
class GenSort(object):
"""\
Some sort of gensort look-alike. No idea on its statistical properties
"""
BREAK_BYTES = struct.pack("2B", 0x00, 0x11)
DATA_HEAD = struct.pack("4B", 0x88, 0x99, 0xAA, 0xBB)
DATA_TAIL = struct.pack("4B", 0xCC, 0xDD, 0xEE, 0xFF)
def __init__(self, seed, row, cache_size):
self.cache_size = cache_size
self.fmt = '0%dx' % (2 * self.cache_size)
self.row = row
self.cache = None
self.index = 0
# we use 10 (keys) + 6 (filler) random bytes per record
self.skip_ahead(16 * row)
random.seed(seed)
def update_cache(self):
r = getrandbits(8 * self.cache_size)
self.cache = format(r, self.fmt).encode('ascii')
def skip_ahead(self, skip):
"""\
Skip ahead skip random bytes
"""
chunks = skip // self.cache_size
cache_size_bits = 8 * self.cache_size
for _ in range(chunks):
getrandbits(cache_size_bits)
self.update_cache()
self.index = 2 * (skip - chunks * self.cache_size)
def next_random_block(self):
if self.index == 2 * self.cache_size:
self.update_cache()
self.index = 0
s, self.index = self.index, self.index + 32
return self.cache[s:self.index]
def generate_record(self):
# 10 bytes of random
# 2 constant bytes
# 32 bytes record number as an ASCII-encoded 32-digit hexadecimal
# 4 bytes of break data
# 48 bytes of filler based on low 48 bits of random
# 4 bytes of break data
rnd = self.next_random_block()
key = rnd[:10]
low = rnd[-12:]
row_id = format(self.row, '032x').encode('ascii')
filler = bytes(sum(map(list, zip(low, low, low, low)), []))
value = (self.BREAK_BYTES + row_id +
self.DATA_HEAD + filler + self.DATA_TAIL)
self.row = self.row + 1
return key, value
class Mapper(api.Mapper):
def __init__(self, context):
super(Mapper, self).__init__(context)
self.gensort = None
def map(self, context):
if self.gensort is None:
row = struct.unpack('>q', context.key)[0]
self.gensort = GenSort(SEED, row, CACHE_SIZE)
key, value = self.gensort.generate_record()
context.emit(key, value)
factory = pp.Factory(mapper_class=Mapper, record_writer_class=Writer)
def __main__():
pp.run_task(factory, auto_serialize=False)
|
apache-2.0
| -846,706,939,941,173,600
| 29.606557
| 77
| 0.631494
| false
| 3.506103
| false
| false
| false
|
timothycrosley/hug
|
hug/interface.py
|
1
|
40948
|
"""hug/interface.py
Defines the various interface hug provides to expose routes to functions
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import argparse
import asyncio
import os
import sys
from collections import OrderedDict
from functools import lru_cache, partial, wraps
import falcon
from falcon import HTTP_BAD_REQUEST
import hug._empty as empty
import hug.api
import hug.output_format
import hug.types as types
from hug import introspect
from hug.exceptions import InvalidTypeData
from hug.format import parse_content_type
from hug.types import (
MarshmallowInputSchema,
MarshmallowReturnSchema,
Multiple,
OneOf,
SmartBoolean,
Text,
text,
)
DOC_TYPE_MAP = {str: "String", bool: "Boolean", list: "Multiple", int: "Integer", float: "Float"}
def _doc(kind):
return DOC_TYPE_MAP.get(kind, kind.__doc__)
def asyncio_call(function, *args, **kwargs):
loop = asyncio.get_event_loop()
if loop.is_running():
return function(*args, **kwargs)
function = asyncio.ensure_future(function(*args, **kwargs), loop=loop)
loop.run_until_complete(function)
return function.result()
class Interfaces(object):
"""Defines the per-function singleton applied to hugged functions defining common data needed by all interfaces"""
def __init__(self, function, args=None):
self.api = hug.api.from_object(function)
self.spec = getattr(function, "original", function)
self.arguments = introspect.arguments(function)
self.name = introspect.name(function)
self._function = function
self.is_coroutine = introspect.is_coroutine(self.spec)
if self.is_coroutine:
self.spec = getattr(self.spec, "__wrapped__", self.spec)
self.takes_args = introspect.takes_args(self.spec)
self.takes_kwargs = introspect.takes_kwargs(self.spec)
self.parameters = list(introspect.arguments(self.spec, self.takes_kwargs + self.takes_args))
if self.takes_kwargs:
self.kwarg = self.parameters.pop(-1)
if self.takes_args:
self.arg = self.parameters.pop(-1)
self.parameters = tuple(self.parameters)
self.defaults = dict(zip(reversed(self.parameters), reversed(self.spec.__defaults__ or ())))
self.required = self.parameters[: -(len(self.spec.__defaults__ or ())) or None]
self.is_method = introspect.is_method(self.spec) or introspect.is_method(function)
if self.is_method:
self.required = self.required[1:]
self.parameters = self.parameters[1:]
self.all_parameters = set(self.parameters)
if self.spec is not function:
self.all_parameters.update(self.arguments)
if args is not None:
transformers = args
else:
transformers = self.spec.__annotations__
self.transform = transformers.get("return", None)
self.directives = {}
self.input_transformations = {}
for name, transformer in transformers.items():
if isinstance(transformer, str):
continue
elif hasattr(transformer, "directive"):
self.directives[name] = transformer
continue
if hasattr(transformer, "from_string"):
transformer = transformer.from_string
elif hasattr(transformer, "load"):
transformer = MarshmallowInputSchema(transformer)
elif hasattr(transformer, "deserialize"):
transformer = transformer.deserialize
self.input_transformations[name] = transformer
def __call__(__hug_internal_self, *args, **kwargs): # noqa: N805
""""Calls the wrapped function, uses __hug_internal_self incase self is passed in as a kwarg from the wrapper"""
if not __hug_internal_self.is_coroutine:
return __hug_internal_self._function(*args, **kwargs)
return asyncio_call(__hug_internal_self._function, *args, **kwargs)
class Interface(object):
"""Defines the basic hug interface object, which is responsible for wrapping a user defined function and providing
all the info requested in the function as well as the route
A Interface object should be created for every kind of protocal hug supports
"""
__slots__ = (
"interface",
"_api",
"defaults",
"parameters",
"required",
"_outputs",
"on_invalid",
"requires",
"validate_function",
"transform",
"examples",
"output_doc",
"wrapped",
"directives",
"all_parameters",
"raise_on_invalid",
"invalid_outputs",
"map_params",
"input_transformations",
)
def __init__(self, route, function):
if route.get("api", None):
self._api = route["api"]
if "examples" in route:
self.examples = route["examples"]
function_args = route.get("args")
if not hasattr(function, "interface"):
function.__dict__["interface"] = Interfaces(function, function_args)
self.interface = function.interface
self.requires = route.get("requires", ())
if "validate" in route:
self.validate_function = route["validate"]
if "output_invalid" in route:
self.invalid_outputs = route["output_invalid"]
if not "parameters" in route:
self.defaults = self.interface.defaults
self.parameters = self.interface.parameters
self.all_parameters = self.interface.all_parameters
self.required = self.interface.required
else:
self.defaults = route.get("defaults", {})
self.parameters = tuple(route["parameters"])
self.all_parameters = set(route["parameters"])
self.required = tuple(
[parameter for parameter in self.parameters if parameter not in self.defaults]
)
if "map_params" in route:
self.map_params = route["map_params"]
for interface_name, internal_name in self.map_params.items():
if internal_name in self.defaults:
self.defaults[interface_name] = self.defaults.pop(internal_name)
if internal_name in self.parameters:
self.parameters = [
interface_name if param == internal_name else param
for param in self.parameters
]
if internal_name in self.all_parameters:
self.all_parameters.remove(internal_name)
self.all_parameters.add(interface_name)
if internal_name in self.required:
self.required = tuple(
[
interface_name if param == internal_name else param
for param in self.required
]
)
reverse_mapping = {
internal: interface for interface, internal in self.map_params.items()
}
self.input_transformations = {
reverse_mapping.get(name, name): transform
for name, transform in self.interface.input_transformations.items()
}
else:
self.map_params = {}
self.input_transformations = self.interface.input_transformations
if "output" in route:
self.outputs = route["output"]
self.transform = route.get("transform", None)
if self.transform is None and not isinstance(self.interface.transform, (str, type(None))):
self.transform = self.interface.transform
if hasattr(self.transform, "dump"):
self.transform = MarshmallowReturnSchema(self.transform)
self.output_doc = self.transform.__doc__
elif self.transform or self.interface.transform:
output_doc = self.transform or self.interface.transform
self.output_doc = output_doc if type(output_doc) is str else _doc(output_doc)
self.raise_on_invalid = route.get("raise_on_invalid", False)
if "on_invalid" in route:
self.on_invalid = route["on_invalid"]
elif self.transform:
self.on_invalid = self.transform
defined_directives = self.api.directives()
used_directives = set(self.parameters).intersection(defined_directives)
self.directives = {
directive_name: defined_directives[directive_name] for directive_name in used_directives
}
self.directives.update(self.interface.directives)
@property
def api(self):
return getattr(self, "_api", self.interface.api)
@property
def outputs(self):
return getattr(self, "_outputs", None)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs # pragma: no cover - generally re-implemented by sub classes
def validate(self, input_parameters, context):
"""Runs all set type transformers / validators against the provided input parameters and returns any errors"""
errors = {}
for key, type_handler in self.input_transformations.items():
if self.raise_on_invalid:
if key in input_parameters:
input_parameters[key] = self.initialize_handler(
type_handler, input_parameters[key], context=context
)
else:
try:
if key in input_parameters:
input_parameters[key] = self.initialize_handler(
type_handler, input_parameters[key], context=context
)
except InvalidTypeData as error:
errors[key] = error.reasons or str(error)
except Exception as error:
if hasattr(error, "args") and error.args:
errors[key] = error.args[0]
else:
errors[key] = str(error)
for require in self.required:
if not require in input_parameters:
errors[require] = "Required parameter '{}' not supplied".format(require)
if not errors and getattr(self, "validate_function", False):
errors = self.validate_function(input_parameters)
return errors
def check_requirements(self, request=None, response=None, context=None):
"""Checks to see if all requirements set pass
if all requirements pass nothing will be returned
otherwise, the error reported will be returned
"""
for requirement in self.requires:
conclusion = requirement(
response=response, request=request, context=context, module=self.api.module
)
if conclusion and conclusion is not True:
return conclusion
def documentation(self, add_to=None):
"""Produces general documentation for the interface"""
doc = OrderedDict() if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc["usage"] = usage
if getattr(self, "requires", None):
doc["requires"] = [
getattr(requirement, "__doc__", requirement.__name__)
for requirement in self.requires
]
doc["outputs"] = OrderedDict()
doc["outputs"]["format"] = _doc(self.outputs)
doc["outputs"]["content_type"] = self.outputs.content_type
parameters = [
param
for param in self.parameters
if not param in ("request", "response", "self")
and not param in ("api_version", "body")
and not param.startswith("hug_")
and not hasattr(param, "directive")
]
if parameters:
inputs = doc.setdefault("inputs", OrderedDict())
types = self.interface.spec.__annotations__
for argument in parameters:
kind = types.get(self._remap_entry(argument), text)
if getattr(kind, "directive", None) is True:
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition["type"] = kind if isinstance(kind, str) else _doc(kind)
default = self.defaults.get(argument, None)
if default is not None:
input_definition["default"] = default
return doc
def _rewrite_params(self, params):
for interface_name, internal_name in self.map_params.items():
if interface_name in params:
params[internal_name] = params.pop(interface_name)
def _remap_entry(self, interface_name):
return self.map_params.get(interface_name, interface_name)
@staticmethod
def cleanup_parameters(parameters, exception=None):
for _parameter, directive in parameters.items():
if hasattr(directive, "cleanup"):
directive.cleanup(exception=exception)
@staticmethod
def initialize_handler(handler, value, context):
try: # It's easier to ask for forgiveness than for permission
return handler(value, context=context)
except TypeError:
return handler(value)
class Local(Interface):
"""Defines the Interface responsible for exposing functions locally"""
__slots__ = ("skip_directives", "skip_validation", "version")
def __init__(self, route, function):
super().__init__(route, function)
self.version = route.get("version", None)
if "skip_directives" in route:
self.skip_directives = True
if "skip_validation" in route:
self.skip_validation = True
self.interface.local = self
def __get__(self, instance, kind):
"""Support instance methods"""
return partial(self.__call__, instance) if instance else self.__call__
@property
def __name__(self):
return self.interface.spec.__name__
@property
def __module__(self):
return self.interface.spec.__module__
def __call__(self, *args, **kwargs):
context = self.api.context_factory(api=self.api, api_version=self.version, interface=self)
"""Defines how calling the function locally should be handled"""
for _requirement in self.requires:
lacks_requirement = self.check_requirements(context=context)
if lacks_requirement:
self.api.delete_context(context, lacks_requirement=lacks_requirement)
return self.outputs(lacks_requirement) if self.outputs else lacks_requirement
for index, argument in enumerate(args):
kwargs[self.parameters[index]] = argument
if not getattr(self, "skip_directives", False):
for parameter, directive in self.directives.items():
if parameter in kwargs:
continue
arguments = (self.defaults[parameter],) if parameter in self.defaults else ()
kwargs[parameter] = directive(
*arguments,
api=self.api,
api_version=self.version,
interface=self,
context=context
)
if not getattr(self, "skip_validation", False):
errors = self.validate(kwargs, context)
if errors:
errors = {"errors": errors}
if getattr(self, "on_invalid", False):
errors = self.on_invalid(errors)
outputs = getattr(self, "invalid_outputs", self.outputs)
self.api.delete_context(context, errors=errors)
return outputs(errors) if outputs else errors
self._rewrite_params(kwargs)
try:
result = self.interface(**kwargs)
if self.transform:
if hasattr(self.transform, "context"):
self.transform.context = context
result = self.transform(result)
except Exception as exception:
self.cleanup_parameters(kwargs, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(kwargs)
self.api.delete_context(context)
return self.outputs(result) if self.outputs else result
class CLI(Interface):
"""Defines the Interface responsible for exposing functions to the CLI"""
def __init__(self, route, function):
super().__init__(route, function)
if not self.outputs:
self.outputs = self.api.cli.output_format
self.interface.cli = self
self.reaffirm_types = {}
use_parameters = list(self.interface.parameters)
self.additional_options = getattr(
self.interface, "arg", getattr(self.interface, "kwarg", False)
)
if self.additional_options:
use_parameters.append(self.additional_options)
used_options = {"h", "help"}
nargs_set = self.interface.takes_args or self.interface.takes_kwargs
class CustomArgumentParser(argparse.ArgumentParser):
exit_callback = None
def exit(self, status=0, message=None):
if self.exit_callback:
self.exit_callback(message)
super().exit(status, message)
self.parser = CustomArgumentParser(
description=route.get("doc", self.interface.spec.__doc__)
)
if "version" in route:
self.parser.add_argument(
"-v",
"--version",
action="version",
version="{0} {1}".format(
route.get("name", self.interface.spec.__name__), route["version"]
),
)
used_options.update(("v", "version"))
self.context_tranforms = []
for option in use_parameters:
if option in self.directives:
continue
if option in self.interface.required or option == self.additional_options:
args = (option,)
else:
short_option = option[0]
while short_option in used_options and len(short_option) < len(option):
short_option = option[: len(short_option) + 1]
used_options.add(short_option)
used_options.add(option)
if short_option != option:
args = ("-{0}".format(short_option), "--{0}".format(option))
else:
args = ("--{0}".format(option),)
kwargs = {}
if option in self.defaults:
kwargs["default"] = self.defaults[option]
if option in self.interface.input_transformations:
transform = self.interface.input_transformations[option]
kwargs["type"] = transform
kwargs["help"] = _doc(transform)
if transform in (list, tuple) or isinstance(transform, types.Multiple):
kwargs["action"] = "append"
kwargs["type"] = Text()
self.reaffirm_types[option] = transform
elif transform == bool or isinstance(transform, type(types.boolean)):
kwargs["action"] = "store_true"
self.reaffirm_types[option] = transform
elif isinstance(transform, types.OneOf):
kwargs["choices"] = transform.values
elif (
option in self.interface.spec.__annotations__
and type(self.interface.spec.__annotations__[option]) == str
):
kwargs["help"] = option
if (
kwargs.get("type", None) == bool or kwargs.get("action", None) == "store_true"
) and not kwargs["default"]:
kwargs["action"] = "store_true"
kwargs.pop("type", None)
elif kwargs.get("action", None) == "store_true":
kwargs.pop("action", None)
if option == self.additional_options:
kwargs["nargs"] = "*"
elif (
not nargs_set
and kwargs.get("action", None) == "append"
and not option in self.interface.defaults
):
kwargs["nargs"] = "*"
kwargs.pop("action", "")
nargs_set = True
self.parser.add_argument(*args, **kwargs)
self.api.cli.commands[route.get("name", self.interface.spec.__name__)] = self
def output(self, data, context):
"""Outputs the provided data using the transformations and output format specified for this CLI endpoint"""
if self.transform:
if hasattr(self.transform, "context"):
self.transform.context = context
data = self.transform(data)
if hasattr(data, "read"):
data = data.read().decode("utf8")
if data is not None:
data = self.outputs(data)
if data:
sys.stdout.buffer.write(data)
if not data.endswith(b"\n"):
sys.stdout.buffer.write(b"\n")
return data
def __str__(self):
return self.parser.description or ""
def __call__(self):
"""Calls the wrapped function through the lens of a CLI ran command"""
context = self.api.context_factory(api=self.api, argparse=self.parser, interface=self)
def exit_callback(message):
self.api.delete_context(context, errors=message)
self.parser.exit_callback = exit_callback
self.api._ensure_started()
for requirement in self.requires:
conclusion = requirement(request=sys.argv, module=self.api.module, context=context)
if conclusion and conclusion is not True:
self.api.delete_context(context, lacks_requirement=conclusion)
return self.output(conclusion, context)
if self.interface.is_method:
self.parser.prog = "%s %s" % (self.api.module.__name__, self.interface.name)
known, unknown = self.parser.parse_known_args()
pass_to_function = vars(known)
for option, directive in self.directives.items():
arguments = (self.defaults[option],) if option in self.defaults else ()
pass_to_function[option] = directive(
*arguments, api=self.api, argparse=self.parser, context=context, interface=self
)
for field, type_handler in self.reaffirm_types.items():
if field in pass_to_function:
if not pass_to_function[field] and type_handler in (
list,
tuple,
hug.types.Multiple,
):
pass_to_function[field] = type_handler(())
else:
pass_to_function[field] = self.initialize_handler(
type_handler, pass_to_function[field], context=context
)
if getattr(self, "validate_function", False):
errors = self.validate_function(pass_to_function)
if errors:
self.api.delete_context(context, errors=errors)
return self.output(errors, context)
args = None
if self.additional_options:
args = []
for parameter in self.interface.parameters:
if parameter in pass_to_function:
args.append(pass_to_function.pop(parameter))
args.extend(pass_to_function.pop(self.additional_options, ()))
if self.interface.takes_kwargs:
add_options_to = None
for option in unknown:
if option.startswith("--"):
if add_options_to:
value = pass_to_function[add_options_to]
if len(value) == 1:
pass_to_function[add_options_to] = value[0]
elif value == []:
pass_to_function[add_options_to] = True
add_options_to = option[2:]
pass_to_function.setdefault(add_options_to, [])
elif add_options_to:
pass_to_function[add_options_to].append(option)
self._rewrite_params(pass_to_function)
try:
if args:
result = self.output(self.interface(*args, **pass_to_function), context)
else:
result = self.output(self.interface(**pass_to_function), context)
except Exception as exception:
self.cleanup_parameters(pass_to_function, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(pass_to_function)
self.api.delete_context(context)
return result
class HTTP(Interface):
"""Defines the interface responsible for wrapping functions and exposing them via HTTP based on the route"""
__slots__ = (
"_params_for_outputs_state",
"_params_for_invalid_outputs_state",
"_params_for_transform_state",
"_params_for_on_invalid",
"set_status",
"response_headers",
"transform",
"input_transformations",
"examples",
"wrapped",
"catch_exceptions",
"parse_body",
"private",
"on_invalid",
"inputs",
)
AUTO_INCLUDE = {"request", "response"}
def __init__(self, route, function, catch_exceptions=True):
super().__init__(route, function)
self.catch_exceptions = catch_exceptions
self.parse_body = "parse_body" in route
self.set_status = route.get("status", False)
self.response_headers = tuple(route.get("response_headers", {}).items())
self.private = "private" in route
self.inputs = route.get("inputs", {})
if "on_invalid" in route:
self._params_for_on_invalid = introspect.takes_arguments(
self.on_invalid, *self.AUTO_INCLUDE
)
elif self.transform:
self._params_for_on_invalid = self._params_for_transform
self.api.http.versions.update(route.get("versions", (None,)))
self.interface.http = self
@property
def _params_for_outputs(self):
if not hasattr(self, "_params_for_outputs_state"):
self._params_for_outputs_state = introspect.takes_arguments(
self.outputs, *self.AUTO_INCLUDE
)
return self._params_for_outputs_state
@property
def _params_for_invalid_outputs(self):
if not hasattr(self, "_params_for_invalid_outputs_state"):
self._params_for_invalid_outputs_state = introspect.takes_arguments(
self.invalid_outputs, *self.AUTO_INCLUDE
)
return self._params_for_invalid_outputs_state
@property
def _params_for_transform(self):
if not hasattr(self, "_params_for_transform_state"):
self._params_for_transform_state = introspect.takes_arguments(
self.transform, *self.AUTO_INCLUDE
)
return self._params_for_transform_state
def gather_parameters(self, request, response, context, api_version=None, **input_parameters):
"""Gathers and returns all parameters that will be used for this endpoint"""
input_parameters.update(request.params)
if self.parse_body and request.content_length:
body = request.bounded_stream
content_type, content_params = parse_content_type(request.content_type)
body_formatter = body and self.inputs.get(
content_type, self.api.http.input_format(content_type)
)
if body_formatter:
body = body_formatter(body, content_length=request.content_length, **content_params)
if "body" in self.all_parameters:
input_parameters["body"] = body
if isinstance(body, dict):
input_parameters.update(body)
elif "body" in self.all_parameters:
input_parameters["body"] = None
if "request" in self.all_parameters:
input_parameters["request"] = request
if "response" in self.all_parameters:
input_parameters["response"] = response
if "api_version" in self.all_parameters:
input_parameters["api_version"] = api_version
for parameter, directive in self.directives.items():
arguments = (self.defaults[parameter],) if parameter in self.defaults else ()
input_parameters[parameter] = directive(
*arguments,
response=response,
request=request,
api=self.api,
api_version=api_version,
context=context,
interface=self
)
return input_parameters
@property
def outputs(self):
return getattr(self, "_outputs", self.api.http.output_format)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs
def transform_data(self, data, request=None, response=None, context=None):
transform = self.transform
if hasattr(transform, "context"):
self.transform.context = context
"""Runs the transforms specified on this endpoint with the provided data, returning the data modified"""
if transform and not (isinstance(transform, type) and isinstance(data, transform)):
if self._params_for_transform:
return transform(
data, **self._arguments(self._params_for_transform, request, response)
)
else:
return transform(data)
return data
def content_type(self, request=None, response=None):
"""Returns the content type that should be used by default for this endpoint"""
if callable(self.outputs.content_type):
return self.outputs.content_type(request=request, response=response)
else:
return self.outputs.content_type
def invalid_content_type(self, request=None, response=None):
"""Returns the content type that should be used by default on validation errors"""
if callable(self.invalid_outputs.content_type):
return self.invalid_outputs.content_type(request=request, response=response)
else:
return self.invalid_outputs.content_type
def _arguments(self, requested_params, request=None, response=None):
if requested_params:
arguments = {}
if "response" in requested_params:
arguments["response"] = response
if "request" in requested_params:
arguments["request"] = request
return arguments
return empty.dict
def set_response_defaults(self, response, request=None):
"""Sets up the response defaults that are defined in the URL route"""
for header_name, header_value in self.response_headers:
response.set_header(header_name, header_value)
if self.set_status:
response.status = self.set_status
response.content_type = self.content_type(request, response)
def render_errors(self, errors, request, response):
data = {"errors": errors}
if getattr(self, "on_invalid", False):
data = self.on_invalid(
data, **self._arguments(self._params_for_on_invalid, request, response)
)
response.status = HTTP_BAD_REQUEST
if getattr(self, "invalid_outputs", False):
response.content_type = self.invalid_content_type(request, response)
response.data = self.invalid_outputs(
data, **self._arguments(self._params_for_invalid_outputs, request, response)
)
else:
response.data = self.outputs(
data, **self._arguments(self._params_for_outputs, request, response)
)
def call_function(self, parameters):
if not self.interface.takes_kwargs:
parameters = {
key: value for key, value in parameters.items() if key in self.all_parameters
}
self._rewrite_params(parameters)
return self.interface(**parameters)
def render_content(self, content, context, request, response, **kwargs):
if hasattr(content, "interface") and (
content.interface is True or hasattr(content.interface, "http")
):
if content.interface is True:
content(request, response, api_version=None, **kwargs)
else:
content.interface.http(request, response, api_version=None, **kwargs)
return
content = self.transform_data(content, request, response, context)
content = self.outputs(
content, **self._arguments(self._params_for_outputs, request, response)
)
if hasattr(content, "read"):
size = None
if hasattr(content, "name") and os.path.isfile(content.name):
size = os.path.getsize(content.name)
if request.range and size:
start, end = request.range
if end < 0:
end = size + end
end = min(end, size)
length = end - start + 1
content.seek(start)
response.data = content.read(length)
response.status = falcon.HTTP_206
response.content_range = (start, end, size)
content.close()
else:
if size:
response.set_stream(content, size)
else:
response.stream = content # pragma: no cover
else:
response.data = content
def __call__(self, request, response, api_version=None, **kwargs):
context = self.api.context_factory(
response=response,
request=request,
api=self.api,
api_version=api_version,
interface=self,
)
"""Call the wrapped function over HTTP pulling information as needed"""
if isinstance(api_version, str) and api_version.isdigit():
api_version = int(api_version)
else:
api_version = None
if not self.catch_exceptions:
exception_types = ()
else:
exception_types = self.api.http.exception_handlers(api_version)
exception_types = tuple(exception_types.keys()) if exception_types else ()
input_parameters = {}
try:
self.set_response_defaults(response, request)
lacks_requirement = self.check_requirements(request, response, context)
if lacks_requirement:
response.data = self.outputs(
lacks_requirement,
**self._arguments(self._params_for_outputs, request, response)
)
self.api.delete_context(context, lacks_requirement=lacks_requirement)
return
input_parameters = self.gather_parameters(
request, response, context, api_version, **kwargs
)
errors = self.validate(input_parameters, context)
if errors:
self.api.delete_context(context, errors=errors)
return self.render_errors(errors, request, response)
self.render_content(
self.call_function(input_parameters), context, request, response, **kwargs
)
except falcon.HTTPNotFound as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
return self.api.http.not_found(request, response, **kwargs)
except exception_types as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
handler = None
exception_type = type(exception)
if exception_type in exception_types:
handler = self.api.http.exception_handlers(api_version)[exception_type][0]
else:
for match_exception_type, exception_handlers in tuple(
self.api.http.exception_handlers(api_version).items()
)[::-1]:
if isinstance(exception, match_exception_type):
for potential_handler in exception_handlers:
if not isinstance(exception, potential_handler.exclude):
handler = potential_handler
if not handler:
raise exception
handler(request=request, response=response, exception=exception, **kwargs)
except Exception as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(input_parameters)
self.api.delete_context(context)
def documentation(self, add_to=None, version=None, prefix="", base_url="", url=""):
"""Returns the documentation specific to an HTTP interface"""
doc = OrderedDict() if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc["usage"] = usage
for example in self.examples:
example_text = "{0}{1}{2}{3}".format(
prefix, base_url, "/v{0}".format(version) if version else "", url
)
if isinstance(example, str):
example_text += "?{0}".format(example)
doc_examples = doc.setdefault("examples", [])
if not example_text in doc_examples:
doc_examples.append(example_text)
doc = super().documentation(doc)
if getattr(self, "output_doc", ""):
doc["outputs"]["type"] = self.output_doc
return doc
@lru_cache()
def urls(self, version=None):
"""Returns all URLS that are mapped to this interface"""
urls = []
for _base_url, routes in self.api.http.routes.items():
for url, methods in routes.items():
for _method, versions in methods.items():
for interface_version, interface in versions.items():
if interface_version == version and interface == self:
if not url in urls:
urls.append(("/v{0}".format(version) if version else "") + url)
return urls
def url(self, version=None, **kwargs):
"""Returns the first matching URL found for the specified arguments"""
for url in self.urls(version):
if [key for key in kwargs.keys() if not "{" + key + "}" in url]:
continue
return url.format(**kwargs)
raise KeyError("URL that takes all provided parameters not found")
class ExceptionRaised(HTTP):
"""Defines the interface responsible for taking and transforming exceptions that occur during processing"""
__slots__ = ("handle", "exclude")
def __init__(self, route, *args, **kwargs):
self.handle = route["exceptions"]
self.exclude = route["exclude"]
super().__init__(route, *args, **kwargs)
|
mit
| 2,164,024,948,178,600,700
| 39.623016
| 120
| 0.582275
| false
| 4.577753
| false
| false
| false
|
jmbowman/media_library
|
settings.py
|
1
|
3402
|
# Django settings for app project.
import os
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Use the new support for timezones in Django 1.4
USE_TZ = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.dirname(__file__) + '/files/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/files/'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/library/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'urls'
# URL for static media files
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.static',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.dirname(__file__) + "/templates",
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'uni_form',
'library',
'debug_toolbar',
)
# Directories outside of individual apps in which static files are found
STATICFILES_DIRS = (
os.path.dirname(__file__) + '/static/',
)
# destination path for static media files on file server
STATIC_ROOT = '/local/static/library/static/'
# Needed for django-debug-toolbar
INTERNAL_IPS = ('127.0.0.1',)
# Finally, grab local settings from your local settings
try:
from local_settings import *
except ImportError:
pass
|
mit
| -8,985,655,191,693,617,000
| 29.927273
| 88
| 0.727513
| false
| 3.650215
| false
| false
| false
|
micha-shepher/oervoer-wizard
|
oervoer-django/oervoer/src/oervoer/settings.py
|
1
|
2765
|
"""
Django settings for oervoer project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
bojan: 06 21543084
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 're78rq!(q%1zvygez@83+9wu+$ew$!hy(v&)4_wkctte-qhyhe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_tables2',
'wizard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'oervoer.urls'
WSGI_APPLICATION = 'oervoer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'oervoer.db'),
}
}
#TEMPLATE_CONTEXT_PROCESSORS =
#(
# 'django.core.context_processors.request',
#)
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
tcp = list(TEMPLATE_CONTEXT_PROCESSORS)
tcp.append('django.core.context_processors.request')
TEMPLATE_CONTEXT_PROCESSORS = tuple(tcp)
#TEMPLATE_CONTEXT_PROCESSORS =("django.contrib.auth.context_processors.auth",
#"django.core.context_processors.debug",
#"django.core.context_processors.i18n",
#"django.core.context_processors.media",
#"django.core.context_processors.static",
#"django.core.context_processors.tz",
#"django.contrib.messages.context_processors.messages",
#'django.core.context_processors.request',)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
gpl-3.0
| -1,681,099,818,332,427,000
| 26.376238
| 77
| 0.739241
| false
| 3.264463
| false
| false
| false
|
jamescallmebrent/dagny
|
src/dagny/renderer.py
|
1
|
9212
|
# -*- coding: utf-8 -*-
from functools import wraps
import odict
from dagny import conneg
class Skip(Exception):
"""
Move on to the next renderer backend.
This exception can be raised by a renderer backend to instruct the
`Renderer` to ignore the current backend and move on to the next-best one.
"""
class Renderer(object):
"""
Manage a collection of renderer backends, and their execution on an action.
A renderer backend is a callable which accepts an `Action` and a `Resource`
and returns an instance of `django.http.HttpResponse`. For example:
>>> def render_html(action, resource):
... from django.http import HttpResponse
... return HttpResponse(content="<html>...</html>")
Backends are associated with mimetypes on the `Renderer`, through mimetype
shortcodes (see `dagny.conneg` for more information on shortcodes). The
`Renderer` exports a dictionary-like interface for managing these
associations:
>>> r = Renderer()
>>> r['html'] = render_html
>>> r['html'] # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> 'html' in r
True
>>> del r['html']
>>> r['html']
Traceback (most recent call last):
...
KeyError: 'html'
>>> 'html' in r
False
A few helpful dictionary methods have also been added, albeit
underscore-prefixed to prevent naming clashes. Behind the scenes, `Renderer`
uses [odict](http://pypi.python.org/pypi/odict), which will keep the keys in
the order they were *first* defined. Here are a few examples:
>>> r['html'] = 1
>>> r['json'] = 2
>>> r['xml'] = 3
>>> r._keys()
['html', 'json', 'xml']
>>> r._items()
[('html', 1), ('json', 2), ('xml', 3)]
>>> r._values()
[1, 2, 3]
This order preservation is useful for ConNeg, since you can define backends
in order of server preference and the negotiator will consider them
appropriately. You can push something to the end of the queue by removing
and then re-adding it:
>>> r['html'] = r._pop('html')
>>> r._keys()
['json', 'xml', 'html']
You can also define backends using a handy decorator-based syntax:
>>> @r.html
... def render_html_2(action, resource):
... from django.http import HttpResponse
... return HttpResponse(content="<html>...</html>")
>>> r['html'] is render_html_2
True
Remember that your shortcode *must* be pre-registered with
`dagny.conneg.MIMETYPES` for this to work, otherwise an `AttributeError`
will be raised. This also introduces the constraint that your shortcode must
be a valid Python identifier.
"""
def __init__(self, backends=None):
if backends is None:
backends = odict.odict()
else:
backends = backends.copy()
self._backends = backends
def __getattr__(self, shortcode):
"""
Support use of decorator syntax to define new renderer backends.
>>> r = Renderer()
>>> @r.html
... def render_html(action, resource):
... return "<html>...</html>"
>>> render_html # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> r['html'] # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> r['html'] is render_html
True
"""
if shortcode not in conneg.MIMETYPES:
raise AttributeError(shortcode)
def decorate(function):
self[shortcode] = function
return function
return decorate
def __call__(self, action, resource):
matches = self._match(action, resource)
for shortcode in matches:
try:
return self[shortcode](action, resource)
except Skip:
continue
return not_acceptable(action, resource)
def _match(self, action, resource):
"""Return all matching shortcodes for a given action and resource."""
matches = []
format_override = resource._format()
if format_override and (format_override in self._keys()):
matches.append(format_override)
accept_header = resource.request.META.get('HTTP_ACCEPT')
if accept_header:
matches.extend(conneg.match_accept(accept_header, self._keys()))
if (not matches) and ('html' in self):
matches.append('html')
return matches
def _bind(self, action):
"""
Bind this `Renderer` to an action, returning a `BoundRenderer`.
>>> r = Renderer()
>>> action = object()
>>> r['html'] = 1
>>> br = r._bind(action)
>>> br # doctest: +ELLIPSIS
<BoundRenderer on <object object at 0x...>>
Associations should be preserved, albeit on a copied `odict`, so that
modifications to the `BoundRenderer` do not propagate back to this.
>>> br['html']
1
>>> br['html'] = 2
>>> br['html']
2
>>> r['html']
1
>>> r['html'] = 3
>>> r['html']
3
>>> br['html']
2
"""
return BoundRenderer(action, backends=self._backends)
def _copy(self):
return type(self)(backends=self._backends)
### <meta>
#
# This chunk of code creates several proxy methods going through to
# `_backends`. A group of them are underscore-prefixed to prevent naming
# clashes with the `__getattr__`-based decorator syntax (so you could
# still associate a backend with a shortcode of 'pop', for example).
proxy = lambda meth: property(lambda self: getattr(self._backends, meth))
for method in ('__contains__', '__getitem__', '__setitem__', '__delitem__'):
vars()[method] = proxy(method)
for method in ('clear', 'get', 'items', 'iteritems', 'iterkeys',
'itervalues', 'keys', 'pop', 'popitem', 'ritems',
'riteritems', 'riterkeys', 'ritervalues', 'rkeys', 'rvalues',
'setdefault', 'sort', 'update', 'values'):
vars()['_' + method] = proxy(method)
_dict = proxy('as_dict')
del method, proxy
#
### </meta>
class BoundRenderer(Renderer):
def __init__(self, action, backends=None):
super(BoundRenderer, self).__init__(backends=backends)
self._action = action
def __repr__(self):
return "<BoundRenderer on %r>" % (self._action,)
def __getattr__(self, shortcode):
"""
Support use of decorator syntax to define new renderer backends.
In this case, decorated functions should be methods which operate on a
resource, and take no other arguments.
>>> action = object()
>>> r = BoundRenderer(action)
>>> old_action_id = id(action)
>>> @r.html
... def action(resource):
... return "<html>...</html>"
>>> id(action) == old_action_id # Object has not changed.
True
Functions will be wrapped internally, so that their function signature
is that of a generic renderer backend. Accessing the
>>> resource = object()
>>> r['html'](action, resource)
'<html>...</html>'
"""
if shortcode not in conneg.MIMETYPES:
raise AttributeError(shortcode)
def decorate(method):
self[shortcode] = resource_method_wrapper(method)
return self._action
return decorate
def __call__(self, resource):
return super(BoundRenderer, self).__call__(self._action, resource)
def resource_method_wrapper(method):
"""
Wrap a 0-ary resource method as a generic renderer backend.
>>> @resource_method_wrapper
... def func(resource):
... print repr(resource)
>>> action = "abc"
>>> resource = "def"
>>> func(action, resource)
'def'
"""
def generic_renderer_backend(action, resource):
return method(resource)
return generic_renderer_backend
def not_acceptable(action, resource):
"""Respond, indicating that no acceptable entity could be generated."""
from django.http import HttpResponse
response = HttpResponse(status=406) # Not Acceptable
del response['Content-Type']
return response
|
unlicense
| 6,525,417,132,887,821,000
| 29.006515
| 80
| 0.530504
| false
| 4.709611
| false
| false
| false
|
gaapt/deepdive
|
src/test/resources/spouse/udf/ext_has_spouse.py
|
1
|
1973
|
#! /usr/bin/env python
import csv
import os
import sys
from collections import defaultdict
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Load the spouse dictionary for distant supervision
spouses = defaultdict(lambda: None)
with open (BASE_DIR + "/../data/spouses.csv") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
spouses[line[0].strip().lower()] = line[1].strip().lower()
# Load relations of people that are not spouse
non_spouses = set()
lines = open(BASE_DIR + '/../data/non-spouses.tsv').readlines()
for line in lines:
name1, name2, relation = line.strip().split('\t')
non_spouses.add((name1, name2)) # Add a non-spouse relation pair
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
if len(parts) != 5:
print >>sys.stderr, 'Failed to parse row:', row
continue
sentence_id, p1_id, p1_text, p2_id, p2_text = parts
p1_text = p1_text.strip()
p2_text = p2_text.strip()
p1_text_lower = p1_text.lower()
p2_text_lower = p2_text.lower()
# See if the combination of people is in our supervision dictionary
# If so, set is_correct to true or false
is_true = '\N'
if spouses[p1_text_lower] == p2_text_lower:
is_true = '1'
if spouses[p2_text_lower] == p1_text_lower:
is_true = '1'
elif (p1_text == p2_text) or (p1_text in p2_text) or (p2_text in p1_text):
is_true = '0'
elif (p1_text_lower, p2_text_lower) in non_spouses:
is_true = '0'
elif (p2_text_lower, p1_text_lower) in non_spouses:
is_true = '0'
print '\t'.join([
p1_id, p2_id, sentence_id,
"%s-%s" %(p1_text, p2_text),
is_true,
"%s-%s" %(p1_id, p2_id),
'\N' # leave "id" blank for system!
])
# TABLE FORMAT: CREATE TABLE has_spouse(
# person1_id bigint,
# person2_id bigint,
# sentence_id bigint,
# description text,
# is_true boolean,
# relation_id bigint, -- unique identifier for has_spouse
# id bigint -- reserved for DeepDive
# );
|
apache-2.0
| -6,167,275,737,590,914,000
| 28.014706
| 76
| 0.642169
| false
| 2.802557
| false
| false
| false
|
google-research-datasets/natural-questions
|
eval_utils.py
|
1
|
10966
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for nq evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
from gzip import GzipFile
import json
import multiprocessing
from absl import flags
from absl import logging
flags.DEFINE_integer(
'long_non_null_threshold', 2,
'Require this many non-null long answer annotations '
'to count gold as containing a long answer.')
flags.DEFINE_integer(
'short_non_null_threshold', 2,
'Require this many non-null short answer annotations '
'to count gold as containing a short answer.')
FLAGS = flags.FLAGS
# A data structure for storing prediction and annotation.
# When a example has multiple annotations, multiple NQLabel will be used.
NQLabel = collections.namedtuple(
'NQLabel',
[
'example_id', # the unique id for each NQ example.
'long_answer_span', # A Span object for long answer.
'short_answer_span_list', # A list of Spans for short answer.
# Note that In NQ, the short answers
# do not need to be in a single span.
'yes_no_answer', # Indicate if the short answer is an yes/no answer
# The possible values are "yes", "no", "none".
# (case insensitive)
# If the field is "yes", short_answer_span_list
# should be empty or only contain null spans.
'long_score', # The prediction score for the long answer prediction.
'short_score' # The prediction score for the short answer prediction.
])
class Span(object):
"""A class for handling token and byte spans.
The logic is:
1) if both start_byte != -1 and end_byte != -1 then the span is defined
by byte offsets
2) else, if start_token != -1 and end_token != -1 then the span is define
by token offsets
3) else, this is a null span.
Null spans means that there is no (long or short) answers.
If your systems only care about token spans rather than byte spans, set all
byte spans to -1.
"""
def __init__(self, start_byte, end_byte, start_token_idx, end_token_idx):
if ((start_byte < 0 and end_byte >= 0) or
(start_byte >= 0 and end_byte < 0)):
raise ValueError('Inconsistent Null Spans (Byte).')
if ((start_token_idx < 0 and end_token_idx >= 0) or
(start_token_idx >= 0 and end_token_idx < 0)):
raise ValueError('Inconsistent Null Spans (Token).')
if start_byte >= 0 and end_byte >= 0 and start_byte >= end_byte:
raise ValueError('Invalid byte spans (start_byte >= end_byte).')
if ((start_token_idx >= 0 and end_token_idx >= 0) and
(start_token_idx >= end_token_idx)):
raise ValueError('Invalid token spans (start_token_idx >= end_token_idx)')
self.start_byte = start_byte
self.end_byte = end_byte
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
def is_null_span(self):
"""A span is a null span if the start and end are both -1."""
if (self.start_byte < 0 and self.end_byte < 0 and
self.start_token_idx < 0 and self.end_token_idx < 0):
return True
return False
def __str__(self):
byte_str = 'byte: [' + str(self.start_byte) + ',' + str(self.end_byte) + ')'
tok_str = ('tok: [' + str(self.start_token_idx) + ',' +
str(self.end_token_idx) + ')')
return byte_str + ' ' + tok_str
def __repr__(self):
return self.__str__()
def is_null_span_list(span_list):
"""Returns true iff all spans in span_list are null or span_list is empty."""
if not span_list or all([span.is_null_span() for span in span_list]):
return True
return False
def nonnull_span_equal(span_a, span_b):
"""Given two spans, return if they are equal.
Args:
span_a: a Span object.
span_b: a Span object. Only compare non-null spans. First, if the bytes are
not negative, compare byte offsets, Otherwise, compare token offsets.
Returns:
True or False
"""
assert isinstance(span_a, Span)
assert isinstance(span_b, Span)
assert not span_a.is_null_span()
assert not span_b.is_null_span()
# if byte offsets are not negative, compare byte offsets
if ((span_a.start_byte >= 0 and span_a.end_byte >= 0) and
(span_b.start_byte >= 0 and span_b.end_byte >= 0)):
if ((span_a.start_byte == span_b.start_byte) and
(span_a.end_byte == span_b.end_byte)):
return True
# if token offsets are not negative, compare token offsets
if ((span_a.start_token_idx >= 0 and span_a.end_token_idx >= 0) and
(span_b.start_token_idx >= 0 and span_b.end_token_idx >= 0)):
if ((span_a.start_token_idx == span_b.start_token_idx) and
(span_a.end_token_idx == span_b.end_token_idx)):
return True
return False
def span_set_equal(gold_span_list, pred_span_list):
"""Make the spans are completely equal besides null spans."""
gold_span_list = [span for span in gold_span_list if not span.is_null_span()]
pred_span_list = [span for span in pred_span_list if not span.is_null_span()]
for pspan in pred_span_list:
# not finding pspan equal to any spans in gold_span_list
if not any([nonnull_span_equal(pspan, gspan) for gspan in gold_span_list]):
return False
for gspan in gold_span_list:
# not finding gspan equal to any spans in pred_span_list
if not any([nonnull_span_equal(pspan, gspan) for pspan in pred_span_list]):
return False
return True
def gold_has_short_answer(gold_label_list):
"""Gets vote from multi-annotators for judging if there is a short answer."""
# We consider if there is a short answer if there is an short answer span or
# the yes/no answer is not none.
gold_has_answer = gold_label_list and sum([
((not is_null_span_list(label.short_answer_span_list)) or
(label.yes_no_answer != 'none')) for label in gold_label_list
]) >= FLAGS.short_non_null_threshold
return gold_has_answer
def gold_has_long_answer(gold_label_list):
"""Gets vote from multi-annotators for judging if there is a long answer."""
gold_has_answer = gold_label_list and (sum([
not label.long_answer_span.is_null_span() # long answer not null
for label in gold_label_list # for each annotator
]) >= FLAGS.long_non_null_threshold)
return gold_has_answer
def read_prediction_json(predictions_path):
"""Read the prediction json with scores.
Args:
predictions_path: the path for the prediction json.
Returns:
A dictionary with key = example_id, value = NQInstancePrediction.
"""
logging.info('Reading predictions from file: %s', format(predictions_path))
with open(predictions_path, 'r') as f:
predictions = json.loads(f.read())
nq_pred_dict = {}
for single_prediction in predictions['predictions']:
if 'long_answer' in single_prediction:
long_span = Span(single_prediction['long_answer']['start_byte'],
single_prediction['long_answer']['end_byte'],
single_prediction['long_answer']['start_token'],
single_prediction['long_answer']['end_token'])
else:
long_span = Span(-1, -1, -1, -1) # Span is null if not presented.
short_span_list = []
if 'short_answers' in single_prediction:
for short_item in single_prediction['short_answers']:
short_span_list.append(
Span(short_item['start_byte'], short_item['end_byte'],
short_item['start_token'], short_item['end_token']))
yes_no_answer = 'none'
if 'yes_no_answer' in single_prediction:
yes_no_answer = single_prediction['yes_no_answer'].lower()
if yes_no_answer not in ['yes', 'no', 'none']:
raise ValueError('Invalid yes_no_answer value in prediction')
if yes_no_answer != 'none' and not is_null_span_list(short_span_list):
raise ValueError('yes/no prediction and short answers cannot coexist.')
pred_item = NQLabel(
example_id=single_prediction['example_id'],
long_answer_span=long_span,
short_answer_span_list=short_span_list,
yes_no_answer=yes_no_answer,
long_score=single_prediction['long_answer_score'],
short_score=single_prediction['short_answers_score'])
nq_pred_dict[single_prediction['example_id']] = pred_item
return nq_pred_dict
def read_annotation_from_one_split(gzipped_input_file):
"""Read annotation from one split of file."""
if isinstance(gzipped_input_file, str):
gzipped_input_file = open(gzipped_input_file, 'rb')
logging.info('parsing %s ..... ', gzipped_input_file.name)
annotation_dict = {}
with GzipFile(fileobj=gzipped_input_file) as input_file:
for line in input_file:
json_example = json.loads(line)
example_id = json_example['example_id']
# There are multiple annotations for one nq example.
annotation_list = []
for annotation in json_example['annotations']:
long_span_rec = annotation['long_answer']
long_span = Span(long_span_rec['start_byte'], long_span_rec['end_byte'],
long_span_rec['start_token'],
long_span_rec['end_token'])
short_span_list = []
for short_span_rec in annotation['short_answers']:
short_span = Span(short_span_rec['start_byte'],
short_span_rec['end_byte'],
short_span_rec['start_token'],
short_span_rec['end_token'])
short_span_list.append(short_span)
gold_label = NQLabel(
example_id=example_id,
long_answer_span=long_span,
short_answer_span_list=short_span_list,
long_score=0,
short_score=0,
yes_no_answer=annotation['yes_no_answer'].lower())
annotation_list.append(gold_label)
annotation_dict[example_id] = annotation_list
return annotation_dict
def read_annotation(path_name, n_threads=10):
"""Read annotations with real multiple processes."""
input_paths = glob.glob(path_name)
pool = multiprocessing.Pool(n_threads)
try:
dict_list = pool.map(read_annotation_from_one_split, input_paths)
finally:
pool.close()
pool.join()
final_dict = {}
for single_dict in dict_list:
final_dict.update(single_dict)
return final_dict
|
apache-2.0
| 4,003,235,773,756,111,400
| 34.035144
| 80
| 0.648915
| false
| 3.546572
| false
| false
| false
|
Azure/azure-sdk-for-python
|
setup.py
|
1
|
2261
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from __future__ import print_function
import os.path
import glob
import copy
import sys
import runpy
root_folder = os.path.abspath(os.path.dirname(__file__))
# pull in any packages that exist in the root directory
packages = {('.', os.path.dirname(p)) for p in glob.glob('azure*/setup.py')}
# Handle the SDK folder as well
packages.update({tuple(os.path.dirname(f).rsplit(os.sep, 1)) for f in glob.glob('sdk/*/azure*/setup.py')})
# [(base_folder, package_name), ...] to {package_name: base_folder, ...}
packages = {package_name: base_folder for (base_folder, package_name) in packages}
# Extract nspkg and sort nspkg by number of "-"
nspkg_packages = [p for p in packages.keys() if "nspkg" in p]
nspkg_packages.sort(key = lambda x: len([c for c in x if c == '-']))
# Meta-packages to ignore
meta_package = ['azure-keyvault', 'azure-mgmt', 'azure', 'azure-storage']
# content packages are packages that are not meta nor nspkg
content_package = sorted([p for p in packages.keys() if p not in meta_package+nspkg_packages])
# Move azure-common at the beginning, it's important this goes first
content_package.remove("azure-common")
content_package.insert(0, "azure-common")
# Package final:
if "install" in sys.argv:
packages_for_installation = content_package
else:
packages_for_installation = nspkg_packages + content_package
for pkg_name in packages_for_installation:
pkg_setup_folder = os.path.join(root_folder, packages[pkg_name], pkg_name)
pkg_setup_path = os.path.join(pkg_setup_folder, 'setup.py')
try:
saved_dir = os.getcwd()
saved_syspath = sys.path
os.chdir(pkg_setup_folder)
sys.path = [pkg_setup_folder] + copy.copy(saved_syspath)
print("Start ", pkg_setup_path)
result = runpy.run_path(pkg_setup_path)
except Exception as e:
print(e, file=sys.stderr)
finally:
os.chdir(saved_dir)
sys.path = saved_syspath
|
mit
| -1,612,231,462,446,572,300
| 35.435484
| 106
| 0.648074
| false
| 3.513219
| false
| false
| false
|
defeo/cypari2
|
autogen/doc.py
|
1
|
10642
|
# -*- coding: utf-8 -*-
"""
Handle PARI documentation
"""
from __future__ import unicode_literals
import re
import subprocess
leading_ws = re.compile("^( +)", re.MULTILINE)
trailing_ws = re.compile("( +)$", re.MULTILINE)
double_space = re.compile(" +")
end_space = re.compile(r"(@\[end[a-z]*\])([A-Za-z])")
end_paren = re.compile(r"(@\[end[a-z]*\])([(])")
begin_verb = re.compile(r"@1")
end_verb = re.compile(r"@[23] *@\[endcode\]")
verb_loop = re.compile("^( .*)@\[[a-z]*\]", re.MULTILINE)
dollars = re.compile(r"@\[dollar\]\s*(.*?)\s*@\[dollar\]", re.DOTALL)
doubledollars = re.compile(r"@\[doubledollar\]\s*(.*?)\s*@\[doubledollar\] *", re.DOTALL)
math_loop = re.compile(r"(@\[start[A-Z]*MATH\][^@]*)@\[[a-z]*\]")
math_backslash = re.compile(r"(@\[start[A-Z]*MATH\][^@]*)=BACKSLASH=")
prototype = re.compile("^[^\n]*\n\n")
library_syntax = re.compile("The library syntax is.*", re.DOTALL)
newlines = re.compile("\n\n\n\n*")
bullet_loop = re.compile("(@BULLET( [^\n]*\n)*)([^ \n])")
indent_math = re.compile("(@\\[startDISPLAYMATH\\].*\n(.+\n)*)(\\S)")
escape_backslash = re.compile(r"^(\S.*)[\\]", re.MULTILINE)
escape_mid = re.compile(r"^(\S.*)[|]", re.MULTILINE)
escape_percent = re.compile(r"^(\S.*)[%]", re.MULTILINE)
escape_hash = re.compile(r"^(\S.*)[#]", re.MULTILINE)
label_define = re.compile(r"@\[label [a-zA-Z0-9:]*\]")
label_ref = re.compile(r"(Section *)?@\[startref\](se:)?([^@]*)@\[endref\]")
def sub_loop(regex, repl, text):
"""
In ``text``, substitute ``regex`` by ``repl`` recursively. As long
as substitution is possible, ``regex`` is substituted.
INPUT:
- ``regex`` -- a compiled regular expression
- ``repl`` -- replacement text
- ``text`` -- input text
OUTPUT: substituted text
EXAMPLES:
Ensure there a space between any 2 letters ``x``::
>>> from autogen.doc import sub_loop
>>> import re
>>> print(sub_loop(re.compile("xx"), "x x", "xxx_xx"))
x x x_x x
"""
while True:
text, n = regex.subn(repl, text)
if not n:
return text
def raw_to_rest(doc):
r"""
Convert raw PARI documentation (with ``@``-codes) to reST syntax.
INPUT:
- ``doc`` -- the raw PARI documentation
OUTPUT: a unicode string
EXAMPLES::
>>> from autogen.doc import raw_to_rest
>>> print(raw_to_rest(b"@[startbold]hello world@[endbold]"))
:strong:`hello world`
TESTS::
>>> raw_to_rest(b"@[invalid]")
Traceback (most recent call last):
...
SyntaxError: @ found: @[invalid]
>>> s = b'@3@[startbold]*@[endbold] snip @[dollar]0@[dollar]\ndividing @[dollar]#E@[dollar].'
>>> print(raw_to_rest(s))
- snip :math:`0`
dividing :math:`\#E`.
"""
doc = doc.decode("utf-8")
# Work around a specific problem with doc of "component"
doc = doc.replace("[@[dollar]@[dollar]]", "[]")
# Work around a specific problem with doc of "algdivl"
doc = doc.replace(r"\y@", r"\backslash y@")
# Special characters
doc = doc.replace("@[lt]", "<")
doc = doc.replace("@[gt]", ">")
doc = doc.replace("@[pm]", "±")
doc = doc.replace("@[nbrk]", "\xa0")
doc = doc.replace("@[agrave]", "à")
doc = doc.replace("@[aacute]", "á")
doc = doc.replace("@[eacute]", "é")
doc = doc.replace("@[ouml]", "ö")
doc = doc.replace("@[uuml]", "ü")
doc = doc.replace("\\'{a}", "á")
# Remove leading and trailing whitespace from every line
doc = leading_ws.sub("", doc)
doc = trailing_ws.sub("", doc)
# Remove multiple spaces
doc = double_space.sub(" ", doc)
# Sphinx dislikes inline markup immediately followed by a letter:
# insert a non-breaking space
doc = end_space.sub("\\1\xa0\\2", doc)
# Similarly, for inline markup immediately followed by an open
# parenthesis, insert a space
doc = end_paren.sub("\\1 \\2", doc)
# Fix labels and references
doc = label_define.sub("", doc)
doc = label_ref.sub("``\\3`` (in the PARI manual)", doc)
# Bullet items
doc = doc.replace("@3@[startbold]*@[endbold] ", "@BULLET ")
doc = sub_loop(bullet_loop, "\\1 \\3", doc)
doc = doc.replace("@BULLET ", "- ")
# Add =VOID= in front of all leading whitespace (which was
# intentionally added) to avoid confusion with verbatim blocks.
doc = leading_ws.sub(r"=VOID=\1", doc)
# Verbatim blocks
doc = begin_verb.sub("::\n\n@0", doc)
doc = end_verb.sub("", doc)
doc = doc.replace("@0", " ")
doc = doc.replace("@3", "")
# Remove all further markup from within verbatim blocks
doc = sub_loop(verb_loop, "\\1", doc)
# Pair dollars -> beginmath/endmath
doc = doc.replace("@[dollar]@[dollar]", "@[doubledollar]")
doc = dollars.sub(r"@[startMATH]\1@[endMATH]", doc)
doc = doubledollars.sub(r"@[startDISPLAYMATH]\1@[endDISPLAYMATH]", doc)
# Replace special characters (except in verbatim blocks)
# \ -> =BACKSLASH=
# | -> =MID=
# % -> =PERCENT=
# # -> =HASH=
doc = sub_loop(escape_backslash, "\\1=BACKSLASH=", doc)
doc = sub_loop(escape_mid, "\\1=MID=", doc)
doc = sub_loop(escape_percent, "\\1=PERCENT=", doc)
doc = sub_loop(escape_hash, "\\1=HASH=", doc)
# Math markup
doc = doc.replace("@[obr]", "{")
doc = doc.replace("@[cbr]", "}")
doc = doc.replace("@[startword]", "\\")
doc = doc.replace("@[endword]", "")
# (special rules for Hom and Frob, see trac ticket 21005)
doc = doc.replace("@[startlword]Hom@[endlword]", "\\text{Hom}")
doc = doc.replace("@[startlword]Frob@[endlword]", "\\text{Frob}")
doc = doc.replace("@[startlword]", "\\")
doc = doc.replace("@[endlword]", "")
doc = doc.replace("@[startbi]", "\\mathbb{")
doc = doc.replace("@[endbi]", "}")
# PARI TeX macros
doc = doc.replace(r"\Cl", r"\mathrm{Cl}")
doc = doc.replace(r"\Id", r"\mathrm{Id}")
doc = doc.replace(r"\Norm", r"\mathrm{Norm}")
doc = doc.replace(r"\disc", r"\mathrm{disc}")
doc = doc.replace(r"\gcd", r"\mathrm{gcd}")
doc = doc.replace(r"\lcm", r"\mathrm{lcm}")
# Remove extra markup inside math blocks
doc = sub_loop(math_loop, "\\1", doc)
# Replace special characters by escape sequences
# Note that =BACKSLASH= becomes an unescaped backslash in math mode
# but an escaped backslash otherwise.
doc = sub_loop(math_backslash, r"\1\\", doc)
doc = doc.replace("=BACKSLASH=", r"\\")
doc = doc.replace("=MID=", r"\|")
doc = doc.replace("=PERCENT=", r"\%")
doc = doc.replace("=HASH=", r"\#")
doc = doc.replace("=VOID=", "")
# Handle DISPLAYMATH
doc = doc.replace("@[endDISPLAYMATH]", "\n\n")
doc = sub_loop(indent_math, "\\1 \\3", doc)
doc = doc.replace("@[startDISPLAYMATH]", "\n\n.. MATH::\n\n ")
# Inline markup. We do use the more verbose :foo:`text` style since
# those nest more easily.
doc = doc.replace("@[startMATH]", ":math:`")
doc = doc.replace("@[endMATH]", "`")
doc = doc.replace("@[startpodcode]", "``")
doc = doc.replace("@[endpodcode]", "``")
doc = doc.replace("@[startcode]", ":literal:`")
doc = doc.replace("@[endcode]", "`")
doc = doc.replace("@[startit]", ":emphasis:`")
doc = doc.replace("@[endit]", "`")
doc = doc.replace("@[startbold]", ":strong:`")
doc = doc.replace("@[endbold]", "`")
# Remove prototype
doc = prototype.sub("", doc)
# Remove everything starting with "The library syntax is"
# (this is not relevant for Python)
doc = library_syntax.sub("", doc)
# Allow at most 2 consecutive newlines
doc = newlines.sub("\n\n", doc)
# Strip result
doc = doc.strip()
# Ensure no more @ remains
try:
i = doc.index("@")
except ValueError:
return doc
ilow = max(0, i-30)
ihigh = min(len(doc), i+30)
raise SyntaxError("@ found: " + doc[ilow:ihigh])
def get_raw_doc(function):
r"""
Get the raw documentation of PARI function ``function``.
INPUT:
- ``function`` -- name of a PARI function
EXAMPLES::
>>> from autogen.doc import get_raw_doc
>>> print(get_raw_doc("cos").decode())
@[startbold]cos@[dollar](x)@[dollar]:@[endbold]
<BLANKLINE>
@[label se:cos]
Cosine of @[dollar]x@[dollar].
...
>>> get_raw_doc("abcde")
Traceback (most recent call last):
...
RuntimeError: no help found for 'abcde'
"""
doc = subprocess.check_output(["gphelp", "-raw", function])
if doc.endswith(b"""' not found !\n"""):
raise RuntimeError("no help found for '{}'".format(function))
return doc
def get_rest_doc(function):
r"""
Get the documentation of the PARI function ``function`` in reST
syntax.
INPUT:
- ``function`` -- name of a PARI function
EXAMPLES::
>>> from autogen.doc import get_rest_doc
>>> print(get_rest_doc("teichmuller"))
Teichmüller character of the :math:`p`-adic number :math:`x`, i.e. the unique
:math:`(p-1)`-th root of unity congruent to :math:`x / p^{v_p(x)}` modulo :math:`p`...
::
>>> print(get_rest_doc("weber"))
One of Weber's three :math:`f` functions.
If :math:`flag = 0`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f(x) = \exp (-i\pi/24).\eta ((x+1)/2)/\eta (x) {such that}
j = (f^{24}-16)^3/f^{24},
<BLANKLINE>
where :math:`j` is the elliptic :math:`j`-invariant (see the function :literal:`ellj`).
If :math:`flag = 1`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f_1(x) = \eta (x/2)/\eta (x) {such that}
j = (f_1^{24}+16)^3/f_1^{24}.
<BLANKLINE>
Finally, if :math:`flag = 2`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f_2(x) = \sqrt{2}\eta (2x)/\eta (x) {such that}
j = (f_2^{24}+16)^3/f_2^{24}.
<BLANKLINE>
Note the identities :math:`f^8 = f_1^8+f_2^8` and :math:`ff_1f_2 = \sqrt2`.
::
>>> doc = get_rest_doc("ellap") # doc depends on PARI version
::
>>> print(get_rest_doc("bitor"))
bitwise (inclusive)
:literal:`or` of two integers :math:`x` and :math:`y`, that is the integer
<BLANKLINE>
.. MATH::
<BLANKLINE>
\sum
(x_i or y_i) 2^i
<BLANKLINE>
See ``bitand`` (in the PARI manual) for the behavior for negative arguments.
"""
raw = get_raw_doc(function)
return raw_to_rest(raw)
|
gpl-2.0
| -2,136,652,580,817,424,100
| 30.178886
| 101
| 0.554552
| false
| 3.123384
| false
| false
| false
|
jalanb/dotsite
|
pysyte/cli/streams.py
|
1
|
1979
|
"""Module to handle streams of text from cli arguments"""
import os
import sys
import contextlib
from itertools import chain
from six import StringIO
from pysyte import iteration
from pysyte.cli import arguments
from pysyte.oss.platforms import get_clipboard_data
def parse_args():
"""Parse out command line arguments"""
parser = arguments.parser(__doc__)
parser.args('streams', help='streams to use')
parser.opt('-p', '--paste', 'paste text from clipboard')
parser.opt('-i', '--stdin', 'wait for text from stdin')
return parser.parse_args()
def args(parsed_args, name=None, files_only=False):
"""Interpret parsed args to streams"""
strings = parsed_args.get_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
elif files_only:
return []
else:
streams = []
if '-' in files or not files or getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
return streams
def files(parsed_args, name=None):
return args(parsed_args, name, True)
def all():
yielded = False
for path in _arg_paths():
yield open(path)
yielded = True
if not yielded or '-' in sys.argv:
yield sys.stdin
def some():
if sys.argv[1:]:
assert _arg_files()
return any()
def clipboard_stream(name=None):
stream = StringIO(get_clipboard_data())
stream.name = name or '<clipboard>'
return stream
def _arg_files():
return [a for a in sys.argv[1:] if os.path.isfile(a)]
def _arg_streams():
"""yield streams to all arg.isfile()"""
for path in _arg_files():
yield open(path)
def _any():
try:
stream = iteration.first(_arg_streams())
if stream:
return _arg_streams()
except ValueError:
return iter([clipboard_stream(), sys.stdin])
|
mit
| 2,554,933,082,323,681,300
| 23.7375
| 73
| 0.632643
| false
| 3.699065
| false
| false
| false
|
aio-libs/aiomysql
|
aiomysql/sa/connection.py
|
1
|
14895
|
# ported from:
# https://github.com/aio-libs/aiopg/blob/master/aiopg/sa/connection.py
import weakref
from sqlalchemy.sql import ClauseElement
from sqlalchemy.sql.dml import UpdateBase
from sqlalchemy.sql.ddl import DDLElement
from . import exc
from .result import create_result_proxy
from .transaction import (RootTransaction, Transaction,
NestedTransaction, TwoPhaseTransaction)
from ..utils import _TransactionContextManager, _SAConnectionContextManager
def noop(k):
return k
class SAConnection:
def __init__(self, connection, engine, compiled_cache=None):
self._connection = connection
self._transaction = None
self._savepoint_seq = 0
self._weak_results = weakref.WeakSet()
self._engine = engine
self._dialect = engine.dialect
self._compiled_cache = compiled_cache
def execute(self, query, *multiparams, **params):
"""Executes a SQL query with optional parameters.
query - a SQL query string or any sqlalchemy expression.
*multiparams/**params - represent bound parameter values to be
used in the execution. Typically, the format is a dictionary
passed to *multiparams:
await conn.execute(
table.insert(),
{"id":1, "value":"v1"},
)
...or individual key/values interpreted by **params::
await conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, a tuple or
individual values in *multiparams may be passed::
await conn.execute(
"INSERT INTO table (id, value) VALUES (%d, %s)",
(1, "v1")
)
await conn.execute(
"INSERT INTO table (id, value) VALUES (%s, %s)",
1, "v1"
)
Returns ResultProxy instance with results of SQL query
execution.
"""
coro = self._execute(query, *multiparams, **params)
return _SAConnectionContextManager(coro)
def _base_params(self, query, dp, compiled, is_update):
"""
handle params
"""
if dp and isinstance(dp, (list, tuple)):
if is_update:
dp = {c.key: pval for c, pval in zip(query.table.c, dp)}
else:
raise exc.ArgumentError(
"Don't mix sqlalchemy SELECT "
"clause with positional "
"parameters"
)
compiled_params = compiled.construct_params(dp)
processors = compiled._bind_processors
params = [{
key: processors.get(key, noop)(compiled_params[key])
for key in compiled_params
}]
post_processed_params = self._dialect.execute_sequence_format(params)
return post_processed_params[0]
async def _executemany(self, query, dps, cursor):
"""
executemany
"""
result_map = None
if isinstance(query, str):
await cursor.executemany(query, dps)
elif isinstance(query, DDLElement):
raise exc.ArgumentError(
"Don't mix sqlalchemy DDL clause "
"and execution with parameters"
)
elif isinstance(query, ClauseElement):
compiled = query.compile(dialect=self._dialect)
params = []
is_update = isinstance(query, UpdateBase)
for dp in dps:
params.append(
self._base_params(
query,
dp,
compiled,
is_update,
)
)
await cursor.executemany(str(compiled), params)
result_map = compiled._result_columns
else:
raise exc.ArgumentError(
"sql statement should be str or "
"SQLAlchemy data "
"selection/modification clause"
)
ret = await create_result_proxy(
self,
cursor,
self._dialect,
result_map
)
self._weak_results.add(ret)
return ret
async def _execute(self, query, *multiparams, **params):
cursor = await self._connection.cursor()
dp = _distill_params(multiparams, params)
if len(dp) > 1:
return await self._executemany(query, dp, cursor)
elif dp:
dp = dp[0]
result_map = None
if isinstance(query, str):
await cursor.execute(query, dp or None)
elif isinstance(query, ClauseElement):
if self._compiled_cache is not None:
key = query
compiled = self._compiled_cache.get(key)
if not compiled:
compiled = query.compile(dialect=self._dialect)
if dp and dp.keys() == compiled.params.keys() \
or not (dp or compiled.params):
# we only want queries with bound params in cache
self._compiled_cache[key] = compiled
else:
compiled = query.compile(dialect=self._dialect)
if not isinstance(query, DDLElement):
post_processed_params = self._base_params(
query,
dp,
compiled,
isinstance(query, UpdateBase)
)
result_map = compiled._result_columns
else:
if dp:
raise exc.ArgumentError("Don't mix sqlalchemy DDL clause "
"and execution with parameters")
post_processed_params = compiled.construct_params()
result_map = None
await cursor.execute(str(compiled), post_processed_params)
else:
raise exc.ArgumentError("sql statement should be str or "
"SQLAlchemy data "
"selection/modification clause")
ret = await create_result_proxy(
self, cursor, self._dialect, result_map
)
self._weak_results.add(ret)
return ret
async def scalar(self, query, *multiparams, **params):
"""Executes a SQL query and returns a scalar value."""
res = await self.execute(query, *multiparams, **params)
return (await res.scalar())
@property
def closed(self):
"""The readonly property that returns True if connections is closed."""
return self._connection is None or self._connection.closed
@property
def connection(self):
return self._connection
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of Transaction. This
object represents the "scope" of the transaction, which
completes when either the .rollback or .commit method is
called.
Nested calls to .begin on the same SAConnection instance will
return new Transaction objects that represent an emulated
transaction within the scope of the enclosing transaction,
that is::
trans = await conn.begin() # outermost transaction
trans2 = await conn.begin() # "nested"
await trans2.commit() # does nothing
await trans.commit() # actually commits
Calls to .commit only have an effect when invoked via the
outermost Transaction object, though the .rollback method of
any of the Transaction objects will roll back the transaction.
See also:
.begin_nested - use a SAVEPOINT
.begin_twophase - use a two phase/XA transaction
"""
coro = self._begin()
return _TransactionContextManager(coro)
async def _begin(self):
if self._transaction is None:
self._transaction = RootTransaction(self)
await self._begin_impl()
return self._transaction
else:
return Transaction(self, self._transaction)
async def _begin_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('BEGIN')
finally:
await cur.close()
async def _commit_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('COMMIT')
finally:
await cur.close()
self._transaction = None
async def _rollback_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('ROLLBACK')
finally:
await cur.close()
self._transaction = None
async def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
.commit() and .rollback(), however the outermost transaction
still controls the overall .commit() or .rollback() of the
transaction of a whole.
"""
if self._transaction is None:
self._transaction = RootTransaction(self)
await self._begin_impl()
else:
self._transaction = NestedTransaction(self, self._transaction)
self._transaction._savepoint = await self._savepoint_impl()
return self._transaction
async def _savepoint_impl(self, name=None):
self._savepoint_seq += 1
name = 'aiomysql_sa_savepoint_%s' % self._savepoint_seq
cur = await self._connection.cursor()
try:
await cur.execute('SAVEPOINT ' + name)
return name
finally:
await cur.close()
async def _rollback_to_savepoint_impl(self, name, parent):
cur = await self._connection.cursor()
try:
await cur.execute('ROLLBACK TO SAVEPOINT ' + name)
finally:
await cur.close()
self._transaction = parent
async def _release_savepoint_impl(self, name, parent):
cur = await self._connection.cursor()
try:
await cur.execute('RELEASE SAVEPOINT ' + name)
finally:
await cur.close()
self._transaction = parent
async def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of
TwoPhaseTransaction, which in addition to the
methods provided by Transaction, also provides a
TwoPhaseTransaction.prepare() method.
xid - the two phase transaction id. If not supplied, a
random id will be generated.
"""
if self._transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress.")
if xid is None:
xid = self._dialect.create_xid()
self._transaction = TwoPhaseTransaction(self, xid)
await self.execute("XA START %s", xid)
return self._transaction
async def _prepare_twophase_impl(self, xid):
await self.execute("XA END '%s'" % xid)
await self.execute("XA PREPARE '%s'" % xid)
async def recover_twophase(self):
"""Return a list of prepared twophase transaction ids."""
result = await self.execute("XA RECOVER;")
return [row[0] for row in result]
async def rollback_prepared(self, xid, *, is_prepared=True):
"""Rollback prepared twophase transaction."""
if not is_prepared:
await self.execute("XA END '%s'" % xid)
await self.execute("XA ROLLBACK '%s'" % xid)
async def commit_prepared(self, xid, *, is_prepared=True):
"""Commit prepared twophase transaction."""
if not is_prepared:
await self.execute("XA END '%s'" % xid)
await self.execute("XA COMMIT '%s'" % xid)
@property
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self._transaction is not None and self._transaction.is_active
async def close(self):
"""Close this SAConnection.
This results in a release of the underlying database
resources, that is, the underlying connection referenced
internally. The underlying connection is typically restored
back to the connection-holding Pool referenced by the Engine
that produced this SAConnection. Any transactional state
present on the underlying connection is also unconditionally
released via calling Transaction.rollback() method.
After .close() is called, the SAConnection is permanently in a
closed state, and will allow no further operations.
"""
if self._connection is None:
return
if self._transaction is not None:
await self._transaction.rollback()
self._transaction = None
# don't close underlying connection, it can be reused by pool
# conn.close()
self._engine.release(self)
self._connection = None
self._engine = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def _distill_params(multiparams, params):
"""Given arguments from the calling form *multiparams, **params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if not zero or hasattr(zero[0], '__iter__') and \
not hasattr(zero[0], 'strip'):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, 'keys'):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if (hasattr(multiparams[0], '__iter__') and
not hasattr(multiparams[0], 'strip')):
return multiparams
else:
return [multiparams]
|
mit
| 8,164,875,323,287,730,000
| 34.047059
| 79
| 0.567237
| false
| 4.754229
| false
| false
| false
|
sebleier/django-alpaca
|
tests/project/settings.py
|
1
|
4735
|
# Django settings for project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'vq^o%2=s(srbps_%coen23cqm3%z&$ti@4vu0red2ngkj_tl0_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'project.blog',
'south',
'alpaca',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
bsd-3-clause
| 9,094,194,376,573,882,000
| 33.311594
| 122
| 0.682999
| false
| 3.707909
| false
| false
| false
|
mekery/pdfdig
|
build/scripts-2.7/pdftotext.py
|
1
|
2534
|
#!/usr/bin/python
'''
@summary: Text's command line script.
Convert pdf to text based on pdftotext.
@author: Micle Bu <mekery@gmail.com>
@copyright: Copyright © 2012 Micle Bu
@license: BSD New
@version: pdftotext.py 2012-03-29 Micle Bu
'''
import sys
import string
from pdfdig.pdftext import Text
def main(argv):
import getopt
def usage():
print ('Usage: %s [Option] File ...\n'
'Options:\n'
' -o, --output OUTFILE \n'
' Specify the output file. \n'
' -y, --layout [layout|raw] \n'
' Maintain the layout of the text. \n'
' "layout" preserve the original physical layout of the text. \n'
' "raw" keep the text in content stream order. This is the default setting. \n'
' -f, --first-page INT \n'
' First page to convert. \n'
' -l, --last-page INT \n'
' Last page to convert. \n'
' -p, --page INT \n'
' Specify a page to convert. \n'
' -h, --help \n'
' Print usage information. \n' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'o:y:f:l:p:h',
['output=','layout=','first-page=','last-page=','pageno=','help'])
except getopt.GetoptError:
return usage()
if not args: return usage()
# option
outfile = None
layout = 'raw'
first = 1
last = 100000
pageno = None
for (k, v) in opts:
if k in ('-o', '--output'): outfile = v
elif k in ('-y', '--layout'): layout = v
elif k in ('-f', '--first-page'): first = string.atoi(v)
elif k in ('-l', '--last-page'): last = string.atoi(v)
elif k in ('-p', '--pageno'): pageno = string.atoi(v)
elif k in ('-h', '--help'): return usage()
# output
if outfile:
f = file(outfile, 'w')
else:
f = sys.stdout
# pdftotext
for pdffile in args:
# pdftext
pc = Text(pdffile, layout=layout)
pages = pc.content
if pageno:
if pageno <= pc.pagecount:
f.write('{0}\n'.format(pages[pageno-1]))
else:
print "Invalide page number!"
else:
f.write('{0}\n'.format(''.join(pages[first-1:last])))
f.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
|
bsd-3-clause
| -4,973,638,659,855,998,000
| 33.256757
| 103
| 0.485793
| false
| 3.584158
| false
| false
| false
|
zozo123/buildbot
|
master/buildbot/steps/shellsequence.py
|
1
|
4370
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
from buildbot.process import buildstep
from buildbot.status import results
from twisted.internet import defer
class ShellArg(results.ResultComputingConfigMixin):
publicAttributes = (
results.ResultComputingConfigMixin.resultConfig +
["command", "logfile"])
def __init__(self, command=None, logfile=None, **kwargs):
name = self.__class__.__name__
if command is None:
config.error("the 'command' parameter of %s "
"must not be None" % (name,))
self.command = command
self.logfile = logfile
for k, v in kwargs.iteritems():
if k not in self.resultConfig:
config.error("the parameter '%s' is not "
"handled by ShellArg" % (k,))
setattr(self, k, v)
# we don't validate anything yet as we can have renderables.
def validateAttributes(self):
# only make the check if we have a list
if not isinstance(self.command, (str, list)):
config.error("%s is an invalid command, "
"it must be a string or a list" % (self.command,))
if isinstance(self.command, list):
if not all([isinstance(x, str) for x in self.command]):
config.error("%s must only have strings in it" % (self.command,))
runConfParams = [(p_attr, getattr(self, p_attr)) for p_attr in self.resultConfig]
not_bool = [(p_attr, p_val) for (p_attr, p_val) in runConfParams if not isinstance(p_val,
bool)]
if not_bool:
config.error("%r must be booleans" % (not_bool,))
@defer.inlineCallbacks
def getRenderingFor(self, build):
for p_attr in self.publicAttributes:
res = yield build.render(getattr(self, p_attr))
setattr(self, p_attr, res)
defer.returnValue(self)
class ShellSequence(buildstep.ShellMixin, buildstep.BuildStep):
renderables = ['commands']
def __init__(self, commands=None, **kwargs):
self.commands = commands
kwargs = self.setupShellMixin(kwargs, prohibitArgs=['command'])
buildstep.BuildStep.__init__(self, **kwargs)
def shouldRunTheCommand(self, cmd):
return bool(cmd)
def getFinalState(self):
return self.describe(True)
@defer.inlineCallbacks
def runShellSequence(self, commands):
terminate = False
if commands is None:
defer.returnValue(results.EXCEPTION)
overall_result = results.SUCCESS
for arg in commands:
if not isinstance(arg, ShellArg):
defer.returnValue(results.EXCEPTION)
try:
arg.validateAttributes()
except config.ConfigErrors:
defer.returnValue(results.EXCEPTION)
# handle the command from the arg
command = arg.command
if not self.shouldRunTheCommand(command):
continue
# stick the command in self.command so that describe can use it
self.command = command
cmd = yield self.makeRemoteShellCommand(command=command,
stdioLogName=arg.logfile)
yield self.runCommand(cmd)
overall_result, terminate = results.computeResultAndTermination(
arg, cmd.results(), overall_result)
if terminate:
break
defer.returnValue(overall_result)
def run(self):
return self.runShellSequence(self.commands)
|
gpl-3.0
| -9,173,960,222,634,424,000
| 39.091743
| 97
| 0.613272
| false
| 4.441057
| true
| false
| false
|
openstack/senlin
|
senlin/objects/requests/actions.py
|
1
|
3059
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from senlin.common import consts
from senlin.objects import base
from senlin.objects import fields
@base.SenlinObjectRegistry.register
class ActionCreateRequestBody(base.SenlinObject):
fields = {
'name': fields.NameField(),
'cluster_id': fields.StringField(),
'action': fields.StringField(),
'inputs': fields.JsonField(nullable=True, default={}),
}
@base.SenlinObjectRegistry.register
class ActionCreateRequest(base.SenlinObject):
fields = {
'action': fields.ObjectField('ActionCreateRequestBody')
}
@base.SenlinObjectRegistry.register
class ActionListRequest(base.SenlinObject):
action_name_list = list(consts.CLUSTER_ACTION_NAMES)
action_name_list.extend(list(consts.NODE_ACTION_NAMES))
VERSION = '1.1'
VERSION_MAP = {
'1.14': '1.1'
}
fields = {
'name': fields.ListOfStringsField(nullable=True),
'cluster_id': fields.ListOfStringsField(nullable=True),
'action': fields.ListOfEnumField(
valid_values=action_name_list, nullable=True),
'target': fields.ListOfStringsField(nullable=True),
'status': fields.ListOfEnumField(
valid_values=list(consts.ACTION_STATUSES), nullable=True),
'limit': fields.NonNegativeIntegerField(nullable=True),
'marker': fields.UUIDField(nullable=True),
'sort': fields.SortField(
valid_keys=list(consts.ACTION_SORT_KEYS), nullable=True),
'project_safe': fields.FlexibleBooleanField(default=True)
}
def obj_make_compatible(self, primitive, target_version):
super(ActionListRequest, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 14):
if 'cluster_id' in primitive['senlin_object.data']:
del primitive['senlin_object.data']['cluster_id']
@base.SenlinObjectRegistry.register
class ActionGetRequest(base.SenlinObject):
fields = {
'identity': fields.StringField(),
}
@base.SenlinObjectRegistry.register
class ActionDeleteRequest(base.SenlinObject):
fields = {
'identity': fields.StringField()
}
@base.SenlinObjectRegistry.register
class ActionUpdateRequest(base.SenlinObject):
fields = {
'identity': fields.StringField(),
'status': fields.StringField(),
'force': fields.BooleanField(default=False)
}
|
apache-2.0
| -84,909,250,368,545,440
| 30.864583
| 78
| 0.691402
| false
| 3.967575
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.