repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
benhunter/py-stuff
|
misc/csgo-stats.py
|
1
|
12345
|
# https://old.reddit.com/r/GlobalOffensive/comments/8mjqgc/i_made_a_python_script_that_generates_stats_using/
# https://pastebin.com/LLpym05c
import datetime
import matplotlib.pyplot as plt
def min_to_sec(line): # converts minutes in string format 'XXX:XX' to seconds
seconds = 0
seconds += (int(line[-1]))
seconds += (int(line[-2])) * 10
seconds += (int(line[-4])) * 60
if line[-5].isdigit():
seconds += (int(line[-5])) * 600
if line[-6].isdigit():
seconds += (int(line[-6])) * 6000
return seconds
def create_plot(entries, plottitle, xaxlabel, filelabel, res, kdinput): # dont feel like commenting this tbh
if kdinput:
plt.hist(entries, bins=(int(max(entries) * res)))
else:
plt.hist(entries, bins=range(min(entries), max(entries) + 1, 1))
plt.title(plottitle)
if kdinput:
plt.xticks(range(0, int(max(entries))))
plt.xlabel(xaxlabel)
plt.ylabel('Occurrences')
ax = plt.gca()
ax.set_axisbelow(True)
ax.grid(color='b', linestyle=':', alpha=0.3, linewidth=1)
xleft, xright = ax.get_xlim()
ybottom, ytop = ax.get_ylim()
ax.set_aspect(abs((xright - xleft) / (ybottom - ytop)) * 0.4)
plt.savefig(filelabel, dpi=300)
plt.clf()
filename = input("Input File Name (e.g. stats.txt or stats.htm): ")
steamid = input("Your Steam ID: ")
# splits file into list of individual HTML element strings
file = open(filename, encoding="utf8").read().split('<')
stats = [] # contains lists of individual games
# Format: ['MAP', [D, M, Y], Q LENGTH, GAME LENGTH, GAME SCORE,[PING, K, A, D, MVP, HSP, Score]]
current_game = [0] * 6 # temporarily holds current game data
begin = False # for parsing through beginning of document
for i, line in enumerate(file):
line = line.strip()
if 'td>\n' in line: # game info lines begin with <td>\n for some reason
if 'Competitive' in line[10:]:
begin = True # begin storing document data here
current_game[0] = line[22:]
if line[10:12] == '20':
year = line[10:14]
month = line[15:17]
day = line[18:20]
current_game[1] = list(map(int, [day, month, year]))
if 'Wait Time:' in line[10:]:
current_game[2] = min_to_sec(line)
if 'Match Duration:' in line[10:]:
current_game[3] = min_to_sec(line)
# stores personal game data as list
if begin and line[0:7] == 'a class' and steamid in line:
ping = file[i + 4][3:]
k = file[i + 6][3:]
a = file[i + 8][3:]
d = file[i + 10][3:]
# had to do this because single MVPs don't contain the number '1' by the star
mvp = -1 # if MVP entry is empty
if file[i + 12][-2] == '>':
mvp = 1
else:
for j, char in enumerate(file[i + 12]):
if char.isdigit():
mvp = file[i + 12][j:]
break
# had to do this because some HSP entries are empty
hsp = -1 # if HSP entry is empty
if file[i + 14][-2].isdigit():
hsp = file[i + 14][3:len(file[i + 14]) - 1]
score = file[i + 16][3:]
# appends performance data (list of ints) to stats list as fifth 6th element
current_game[5] = list(map(int, [ping, k, a, d, mvp, hsp, score]))
# gets the match score and sorts it in a list of 2 ints (your score first)
if 'csgo_scoreboard_score' in line:
match_score = line[45:].split(' : ')
if not isinstance(current_game[5], list):
match_score.reverse()
current_game[4] = list(map(int, match_score))
if isinstance(current_game[4], list) and isinstance(current_game[5],
list): # individual game lists contain 6 entries
stats.append(current_game)
current_game = [0] * 6 # clears list before recording next game's info
current_game[3] = 1800 # 30 minute placeholder
# declaration of stat variables
total_kills = 0
total_deaths = 0
total_assists = 0
total_MVPs = 0
total_rounds_w = 0
total_rounds_l = 0
max_match_length = 0
min_match_length = 5400
win_streak = 0
loss_streak = 0
tie_streak = 0
max_win_streak = 0
max_loss_streak = 0
max_tie_streak = 0
total_score = 0
hsp = [] # list containing all hsps
mvp = [] # list containing all mvps
map_plays = {} # dict containing maps (keys) and plays (vals)
# initializing output file
output = open('output.txt', 'w')
stats.reverse()
# looping through every 'stats' entry (game lists)
for i, stat in enumerate(stats):
# writing a list of every match to the output file
output.write('\n' + str(i) + ': ' + repr(stat))
# summing K, D, A, MVP
total_kills += stat[5][1]
total_deaths += stat[5][3]
total_assists += stat[5][2]
total_MVPs += stat[5][4]
total_rounds_w += stat[4][0]
total_rounds_l += stat[4][1]
total_score += stat[5][6]
# creating list of Headshot Percentages (-1 excluded because -1 means no entry was listed)
if stat[5][5] >= 0:
hsp.append(stat[5][5])
# creating list of MVPs (-1 excluded because -1 means no entry was listed)
if stat[5][4] >= 0:
mvp.append(stat[5][4])
# finding the longest match
if stat[3] > max_match_length:
max_match_length = stat[3]
max_match_index = i
if stat[3] < min_match_length:
min_match_length = stat[3]
min_match_index = i
# builds dictionary containing maps and number of times map has been played
if stat[0] not in map_plays:
map_plays[stat[0]] = 1
else:
map_plays[stat[0]] += 1
###########################################################################
# convoluted way of calculating win/tie/loss streaks:
if stat[4][0] > stat[4][1]:
win_streak += 1
loss_streak, tie_streak = 0, 0
elif stat[4][0] == stat[4][1]:
tie_streak += 1
win_streak, loss_streak = 0, 0
else:
loss_streak += 1
win_streak, tie_streak = 0, 0
if win_streak > max_win_streak:
max_win_streak = win_streak
max_win_index = i
if tie_streak > max_tie_streak:
max_tie_streak = tie_streak
max_tie_index = i
if loss_streak > max_loss_streak:
max_loss_streak = loss_streak
max_loss_index = i
################################################################################
# writing output to output.txt file
output.write('\nFormat: [\'MAP\', [D, M, Y], QUEUE LENGTH, GAME LENGTH, GAME SCORE, [PING, K, A, D, MVP, HSP, Score]]')
output.write('\n\nSTATS----------------------------------------------------------------\n')
output.write('{:<20} {:>7}'.format('\nTotal Kills:', total_kills))
output.write('{:<20} {:>7}'.format('\nTotal Deaths:', total_deaths))
output.write('{:<20} {:>7}'.format('\nTotal Assists:', total_assists))
output.write('{:<20} {:>7}'.format('\nTotal MVPs:', total_MVPs))
kdr = round(total_kills / total_deaths, 3)
output.write('{:<20} {:>7}'.format('\nK/D:', kdr))
output.write('\n')
output.write('{:<20} {:>7}'.format('\nTotal Rounds Won:', total_rounds_w))
output.write('{:<20} {:>7}'.format('\nTotal Rounds Lost:', total_rounds_l))
output.write('\n\nAverages (per game):')
output.write('\n\t{:<15} {:>8}'.format('K:', round(total_kills / len(stats), 2)))
output.write('\n\t{:<15} {:>8}'.format('D:', round(total_deaths / len(stats), 2)))
output.write('\n\t{:<15} {:>8}'.format('A:', round(total_assists / len(stats), 2)))
output.write('\n\t{:<15} {:>8}'.format('MVP:', round(total_MVPs / len(stats), 2)))
output.write('\n\t{:<15} {:>8}'.format('Score:', round(total_score / len(stats), 2)))
avg_rounds_won = round(total_rounds_w / len(stats), 1)
avg_rounds_lost = round(total_rounds_l / len(stats), 1)
output.write('\n\t{:<10} {} : {}'.format('Match (W:L):', avg_rounds_won, avg_rounds_lost))
total_rounds = total_rounds_l + total_rounds_w
output.write('\n\nAverages (per round):')
output.write('\n\t{:<15} {:>8}'.format('K:', round(total_kills / total_rounds, 2)))
output.write('\n\t{:<15} {:>8}'.format('D:', round(total_deaths / total_rounds, 2)))
output.write('\n\t{:<15} {:>8}'.format('A:', round(total_assists / total_rounds, 2)))
output.write('\n\t{:<15} {:>8}'.format('MVP:', round(total_MVPs / total_rounds, 2)))
output.write('\n\nHSP:')
output.write('\n\t{:<10} {:>8}%'.format('Max:', round(max(hsp), 2)))
output.write('\n\t{:<10} {:>8}%'.format('Min:', round(min(hsp), 2)))
output.write('\n\t{:<10} {:>8}%'.format('Avg:', round(sum(hsp) / len(hsp), 1)))
output.write(
'\n\nLongest Match:\t\t{}\t\t(game #{})'.format(datetime.timedelta(seconds=max_match_length), max_match_index))
output.write(
'\nShortest Match:\t\t{}\t\t(game #{})'.format(datetime.timedelta(seconds=min_match_length), min_match_index))
output.write(
'\nMax Win Streak: \t{}\t\t(from game #{} to #{})'.format(max_win_streak, max_win_index - max_win_streak + 1,
max_win_index))
output.write(
'\nMax Tie Streak: \t{}\t\t(from game #{} to #{})'.format(max_tie_streak, max_tie_index - max_tie_streak + 1,
max_tie_index))
output.write(
'\nMax Loss Streak: \t{}\t\t(from game #{} to #{})'.format(max_loss_streak, max_loss_index - max_loss_streak + 1,
max_loss_index))
output.write('\n\nMap Plays:')
for entry in sorted(map_plays, key=map_plays.get, reverse=True):
output.write('\n\t{:<12} {:>12}'.format(entry, map_plays[entry]))
print('\'output.txt\' can be found in the same directory as this script')
output.close()
#####################################################################
# graphing and graphing calculations done below
# lists containing raw vals for each stat
kd = []
kills = []
deaths = []
assists = []
mvps = []
hsps = []
rw = [] # rounds won
rl = []
games_played = {}
for stat in stats:
# collects vals from each game
kills.append(stat[5][1])
deaths.append(stat[5][3])
assists.append(stat[5][2])
if stat[5][4] == -1:
mvps.append(0)
else:
mvps.append(stat[5][4])
if stat[5][5] == -1:
hsps.append(0)
else:
hsps.append(stat[5][5])
if stat[5][3] > 0:
kd.append(stat[5][1] / stat[5][3])
else:
kd.append(1)
if stat[4][0] < 15:
rw.append(stat[4][0])
if stat[4][1] < 15:
rl.append(stat[4][1])
if stat[1][2] * 12 + stat[1][1] not in games_played:
games_played[stat[1][2] * 12 + stat[1][1]] = 1
else:
games_played[stat[1][2] * 12 + stat[1][1]] += 1
plt.rc('font', size=8)
create_plot(kd, 'K/D Distribution', 'K/D (resolution: 0.05)', 'KD_Distribution.png', 20, True)
kd_trimmed = [x for x in kd if x <= 3]
create_plot(kd_trimmed, 'K/D Distribution (truncated at x = 3)', 'K/D (resolution: 0.01)',
'KD_Distribution (TRIMMED).png', 100, True)
create_plot(kills, 'Kill Distribution', 'Kills', 'Kill_Distribution.png', 0, False)
create_plot(deaths, 'Death Distribution', 'Deaths', 'Death_Distribution.png', 0, False)
create_plot(assists, 'Assist Distribution', 'Assists', 'Assist_Distribution.png', 0, False)
create_plot(mvps, 'MVP Distribution', 'MVPs', 'MVP_Distribution.png', 0, False)
create_plot(hsps, 'HSP Distribution', 'HSP', 'HSP_Distribution.png', 0, False)
create_plot(rw, 'Rounds Won Distribution (exc. 15, 16)', 'Rounds', 'RW_Distribution.png', 0, False)
create_plot(rl, 'Rounds Lost Distribution (exc. 15, 16)', 'Rounds', 'RL_Distribution.png', 0, False)
# graphing games played
games_played_x = []
games_played_y = []
for entry in sorted(games_played):
games_played_x.append(entry - 1)
games_played_y.append(games_played[entry])
games_played_x_string = []
for entry in games_played_x:
year = int(entry / 12)
month = (entry % 12) + 1
monthyear = str(month) + '-' + str(year)
games_played_x_string.append(monthyear)
plt.bar(games_played_x, games_played_y)
plt.title('Games Played Per Month')
plt.xlabel('Month')
plt.ylabel('Occurrences')
plt.xticks(games_played_x[::4], games_played_x_string[::4], rotation='45')
plt.savefig('Games_Played.png', dpi=300)
plt.clf()
print('output images can be found in the same directory as this script')
|
mit
| 500,048,332,103,682,240
| 35.202346
| 119
| 0.580154
| false
| 2.992727
| false
| false
| false
|
fusionbox/mezzanine
|
mezzanine/core/templatetags/mezzanine_tags.py
|
1
|
23919
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model, get_model
from django.template import (Context, Node, TextNode, Template,
TemplateSyntaxError, TOKEN_TEXT, TOKEN_VAR, TOKEN_COMMENT, TOKEN_BLOCK)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.inclusion_tag("includes/form_fields.html", takes_context=True)
def fields_for(context, form):
"""
Renders fields for a form.
"""
context["form_for_fields"] = form
return context
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
if app.strip("\"'") not in settings.INSTALLED_APPS:
while True:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
parser.tokens.insert(0, token)
break
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, quality=95, left=.5, top=.5,
padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width and
height on the first time it is requested, and returns the URL to the new
resized image. if width or height are zero then original ratio is
maintained.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
group_title = group_title.title()
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if perms["add"]:
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
app_title = opts.app_label.title()
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
sites = user.sitepermissions.get().sites.all()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url
|
bsd-2-clause
| 290,233,697,215,057,200
| 34.435556
| 79
| 0.609641
| false
| 3.937932
| false
| false
| false
|
ActiveState/code
|
recipes/Python/511508_Binomial_Queues/recipe-511508.py
|
1
|
4907
|
"""
BinomialQueue.py
Meldable priority queues
Written by Gregoire Dooms and Irit Katriel
"""
class LinkError(Exception): pass
class EmptyBinomialQueueError(Exception): pass
class BinomialTree:
"A single Binomial Tree"
def __init__(self, value):
"Create a one-node tree. value is the priority of this node"
self.value = value
self.rank = 0
self.children = []
def link(self, other_tree):
"""Make other_tree the son of self. Both trees must have the
same rank, and other_tree must have a larger minimum priority
"""
if self.rank != other_tree.rank:
raise LinkError()
if self.value > other_tree.value:
raise LinkError()
self.children.append(other_tree)
self.rank += 1
return True
def str(self, indent = 0):
return (" "*indent +
"rank: %d value: %d"%(self.rank, self.value)+
"\n"+"".join(child.str(indent+2)
for child in self.children)
)
def __str__(self):
return self.str()
class BinomialQueue:
""" A Meldable priority Queue """
def __init__(self,infinity=1e300):
"""
Create an empty Binomial Queue.
Since a queue can hold any comparable data type, we need to know
at initialization time what an "infinity" element looks like.
"""
self.infinity = infinity
self.parent = self
self.trees = []
self.elements = 0
self.min = self.infinity
self.min_tree_rank = -1
def __capacity(self):
return 2**len(self.trees) - 1
def __resize(self):
while self.__capacity() < self.elements:
self.trees.append(None)
def __add_tree(self,new_tree):
" Insert new_tree into self"
self.elements = self.elements + 2**new_tree.rank
self.__resize()
while self.trees[new_tree.rank] is not None:
if self.trees[new_tree.rank].value < new_tree.value:
new_tree, self.trees[new_tree.rank] = \
self.trees[new_tree.rank], new_tree # swap
r = new_tree.rank
new_tree.link(self.trees[r])
self.trees[r] = None
self.trees[new_tree.rank] = new_tree
if new_tree.value <= self.min:
self.min = new_tree.value
self.min_tree_rank = new_tree.rank
def meld(self, other_queue):
"Insert all elements of other_queue into self "
for tree in other_queue.trees:
if tree is not None:
self.__add_tree(tree)
def insert(self, value):
"Insert value into self "
tree = BinomialTree(value)
self.__add_tree(tree)
def get_min(self):
"Return the minimum element in self"
return self.min
def delete_min(self):
"Delete the minumum element from self "
if not self:
raise EmptyBinomialQueueError()
to_remove = self.trees[self.min_tree_rank]
self.trees[to_remove.rank] = None
self.elements = self.elements - 2**to_remove.rank
for child in to_remove.children:
self.__add_tree(child)
self.min = self.infinity
for tree in self.trees:
if tree is not None:
if tree.value <= self.min:
self.min = tree.value
self.min_tree_rank = tree.rank
def __nonzero__(self):
return self.elements
def __str__(self):
s = """elements: %d min: %s
min_tree_rank: %d
tree vector: """ % (self.elements, str(self.min), self.min_tree_rank)
s += " ".join("10"[tree is None] for tree in self.trees)
s += "\n"
s += "".join(str(tree) for tree in self.trees if tree is not None)
return s
def __len__(self):
return self.elements
def __iadd__(self,other):
if type(other) == type(self):
self.meld(other)
else:
self.insert(other)
return self
def run_test():
inf = 2e300
N = 10
Q1 = BinomialQueue(inf)
Q2 = BinomialQueue(inf)
print Q1
print "-------------------------------------------"
Q1 += 20 # Same as Q1.insert(20)
Q1.insert(1)
Q1.insert(5)
Q1.insert(10)
print Q1
print "-------------------------------------------"
Q2.insert(2)
Q2.insert(22)
Q2.insert(12)
print Q2
print "-------------------------------------------"
Q1 += Q2 # Same as Q1.meld(Q2)
print Q1
print "-------------------------------------------"
while Q1:
print "Q1.min = ", Q1.min
Q1.delete_min()
if __name__ == "__main__":
run_test()
|
mit
| -1,932,582,368,276,943,000
| 27.864706
| 77
| 0.509069
| false
| 3.857704
| false
| false
| false
|
1000ideas/sublime_redmine
|
Redmine.py
|
1
|
6831
|
import re
import json
import functools
import urllib, urllib2
import sublime, sublime_plugin, threading
import webbrowser
class RedmineError(Exception):
pass
def main_thread(callback, *args, **kwargs):
# sublime.set_timeout gets used to send things onto the main thread
# most sublime.[something] calls need to be on the main thread
sublime.set_timeout(functools.partial(callback, *args, **kwargs), 0)
def open_in_browser(url, browser = None):
if not re.search("^https?://", url):
url = "http://" + url
try:
print browser
webbrowser.get(browser).open_new_tab(url)
except webbrowser.Error:
sublime.error_message("[Redmine] Invalid browser command")
class RedmineAPIThread(threading.Thread):
def __init__(self, method, path, callback = None, data={}, host = '', apikey = ''):
if re.search("^https?://", host):
self.host = host
else:
self.host = "http://" + host
self.key = apikey
self.method = method
self.path = path
self.data = data
self.callback = callback
threading.Thread.__init__(self)
def run(self):
h = {
"X-Redmine-API-Key": self.key,
"Content-Type": 'application/json'
}
try:
opener = urllib2.build_opener(urllib2.HTTPHandler)
if self.method == "GET":
url = "%s/%s.json?%s" % (self.host, self.path, urllib.urlencode(self.data))
_data = None
else:
url = "%s/%s.json" % (self.host, self.path)
_data = json.dumps(self.data) #if self.data != None else None
print "[%s] %s" %(self.method, url)
req = urllib2.Request(url, _data, headers= h)
req.get_method = lambda: self.method
http_file = urllib2.urlopen(req)
main_thread(self.callback, http_file.read().decode('utf-8'))
except urllib2.HTTPError as e:
main_thread(sublime.error_message, "[Redmine] %s (%s)" % (e, url))
except urllib2.URLError as e:
main_thread(sublime.error_message, "[Redmine] URLError: %s" % (e))
class RedmineCommand(sublime_plugin.WindowCommand):
def api_call(self, path, data={}, method="GET", callback=None):
try:
s = sublime.load_settings("Redmine.sublime-settings")
host = s.get('host')
if len(host) == 0: raise RedmineError("Invalid host name")
apikey = s.get('apikey')
if len(apikey) == 0: raise RedmineError("Invalid host name")
thread = RedmineAPIThread(method, path, callback or self.generic_callback, data, host, apikey)
thread.start()
except RedmineError as ex:
sublime.error_message("[Redmine] %s" % ex)
def generic_callback(self, output):
pass
def quick_panel(self, *args, **kwargs):
self.window.show_quick_panel(*args, **kwargs)
class ListRedmineStatusesCommand(RedmineCommand):
def __init__(self, window):
self.statuses = []
RedmineCommand.__init__(self, window)
def run(self):
if len(self.statuses) == 0:
self.api_call('issue_statuses')
else:
self.select_status()
def generic_callback(self, output):
jout = json.loads(output)
self.statuses = jout['issue_statuses']
self.select_status()
def select_status(self):
self.quick_panel([s['name'] for s in self.statuses], self.status_selected)
def status_selected(self, idx):
if idx >= 0:
sublime.status_message("Selected status: %s" % (self.statuses[idx]['name']))
class ListRedmineIssuesCommand(RedmineCommand):
def run(self, issue_filter = {}):
issue_filter.update({'sort': 'id:desc'})
self.api_call('issues', issue_filter)
def generic_callback(self, output):
jout = json.loads(output)
self.issues = jout['issues']
self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue)
def select_issue(self, idx):
if idx >= 0:
issue_id = self.issues[idx]['id']
s = sublime.load_settings("Redmine.sublime-settings")
host = s.get('host')
browser = s.get('browser')
if not isinstance(browser, basestring):
browser = None
else:
browser = str(browser)
open_in_browser( "%s/issues/%s" % (host, issue_id), browser )
class UpdateRedmineStatusCommand(ListRedmineStatusesCommand):
def run(self, issue_id):
if issue_id == None: return
self.issue_id = issue_id
ListRedmineStatusesCommand.run(self)
def status_selected(self, idx):
if idx >= 0 and self.issue_id != None:
self.status = self.statuses[idx]
self.api_call(
'issues/%s' % self.issue_id,
{'issue' : {'status_id': self.status['id']}},
'PUT',
self.update_response
)
def update_response(self, output):
sublime.status_message("Status of #%s changed to %s" %(self.issue_id, self.status['name']))
class UpdateRedmineIssuesCommand(ListRedmineIssuesCommand):
def select_issue(self, idx):
if idx >= 0:
issue_id = self.issues[idx]['id']
self.window.run_command('update_redmine_status', {'issue_id': issue_id})
class StartRedmineIssuesCommand(ListRedmineIssuesCommand):
def generic_callback(self, output):
jout = json.loads(output)
self.issues = filter(lambda i: not i.get('play', False), jout['issues'])
if len(self.issues) > 0:
self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue)
else:
sublime.status_message("No issues to start!")
def select_issue(self, idx):
if idx >= 0:
self.issue_id = self.issues[idx]['id']
self.api_call(
'issues/%s/start_time' % self.issue_id,
None,
'POST',
self.started_response
)
def started_response(self, output):
jout = json.loads(output)
if jout.get('success', False):
sublime.status_message("Time tracking for #%s started." % (self.issue_id) )
else:
sublime.error_message("[Redmine] Error occured!")
class StopRedmineIssuesCommand(ListRedmineIssuesCommand):
def generic_callback(self, output):
jout = json.loads(output)
self.issues = filter(lambda i: i.get('play', False), jout['issues'])
if len(self.issues) > 0:
self.quick_panel(["#%d: [%s] %s" % (i["id"], i["project"]["name"], i["subject"]) for i in self.issues], self.select_issue)
else:
sublime.status_message("No started issues!")
def select_issue(self, idx):
if idx >= 0:
self.issue_id = self.issues[idx]['id']
self.api_call(
'issues/%s/stop_time' % self.issue_id,
None,
'POST',
self.stoped_response
)
def stoped_response(self, output):
jout = json.loads(output)
if jout.get('success', False):
sublime.status_message("Time tracking for #%s stoped. Time spend: %.2fh." % (self.issue_id, jout.get('time', 0.0)))
else:
sublime.error_message("[Redmine] Error occured!")
|
mit
| -6,297,098,791,352,698,000
| 31.528571
| 128
| 0.631386
| false
| 3.333821
| false
| false
| false
|
ekansa/open-context-py
|
opencontext_py/apps/about/views.py
|
1
|
20142
|
import json
from django.conf import settings
from django.http import HttpResponse, Http404
from django.template import RequestContext, loader
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.libs.rootpath import RootPath
from opencontext_py.libs.requestnegotiation import RequestNegotiation
from opencontext_py.apps.about.estimator import CostEstimator
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.views.decorators.cache import never_cache
# @cache_control(no_cache=True)
# @never_cache
def index_view(request):
""" Get the search context JSON-LD """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Video and introduction to Open Context, an open-access '
'data publication service for archaeology ',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': 'https://opencontext.wistia.com/medias/s0g0fsyqkz'
}
template = loader.get_template('about/index.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About',
'act_nav': 'about',
'og': open_graph,
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def uses_view(request):
""" Get uses page """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
template = loader.get_template('about/uses.html')
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/uses',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Summary of how to use Open Context for sharing, '\
'preserving, exploring and analyzing archaeological '\
'research data',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Uses',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def pub_view(request):
""" Get publishing overview page """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/publishing',
'site_name': settings.CANONICAL_SITENAME,
'description': 'How to publish archaeological research data '\
'with Open Context',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/publishing.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Publishing',
'act_nav': 'about',
'og': open_graph,
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
# @cache_control(no_cache=True)
# @never_cache
def people_view(request):
""" Get people page """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/people',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Data editors, software developers, designers '\
'and alumni with Open Context research data '\
'publishing services',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/people.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - People',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@ensure_csrf_cookie
# @cache_control(no_cache=True)
# @never_cache
def estimate_view(request):
""" Get page with publication project cost estimation """
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/estimate',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Estimate data publication and archiving '\
'costs with Open Context to help budget for '\
'grant data management plans',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/estimate.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Cost Estimate',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def process_estimate(request):
""" process an estimate """
if request.method == 'POST':
cost = CostEstimator()
output = cost.process_estimate(request.POST)
json_output = json.dumps(output,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
elif request.method == 'GET':
cost = CostEstimator()
output = cost.process_estimate(request.GET)
json_output = json.dumps(output,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
return HttpResponseForbidden
def concepts_view(request):
""" Get concepts overview """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
template = loader.get_template('about/temp.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Concepts',
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def tech_view(request):
""" Show technology page """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/technology',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Overview of the open-source software technologies '\
'created and used by Open Context to publish '\
'archaeological data on the Web',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/technology.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Technology',
'act_nav': 'about',
'og': open_graph,
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
def services_view(request):
""" Get page documenting the API """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/technology',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Overview of the APIs (machine-readable data) '\
'offered by Open Context to promote '\
'interoperability and new uses of data',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/services.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Web Services and APIs',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
def recipes_view(request):
""" Get page about recipes using the API """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/recipes',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Specific guidance on the use of Open Context APIs '\
'(machine-readable data) to meet certain data '\
'management needs',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/recipes.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - API Cookbook',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def bibliography_view(request):
""" Get page about bibliography / publications """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/bibliography',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Publications related to Open Context and its '\
'contributions to research data management, '\
'archaeological ethics, scholarly communications, and '\
'professional practice',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/bibliography.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Bibliography',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def ip_view(request):
""" Get page about IP policies """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/intellectual-property',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Intellectual property policies for Open Context and '\
'ethical guidance for contributors and users of '\
'archaeological research data',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/intellectual-property.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Intellectual Property',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
@cache_control(no_cache=True)
@never_cache
def sponsors_view(request):
""" Get the page about sponsors """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/sponsors',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Sources of financial support for '\
'Open Context and collaborative institutions providing '\
'complementary services',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/sponsors.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Intellectual Property',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context, request))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
# @cache_control(no_cache=True)
# @never_cache
def terms_view(request):
""" Get the page about Terms """
request = RequestNegotiation().anonymize_request(request)
rp = RootPath()
base_url = rp.get_baseurl()
req_neg = RequestNegotiation('text/html')
if 'HTTP_ACCEPT' in request.META:
req_neg.check_request_support(request.META['HTTP_ACCEPT'])
if req_neg.supported:
# requester wanted a mimetype we DO support
open_graph = {
'twitter_site': settings.TWITTER_SITE,
'type': 'website',
'url': base_url + '/about/terms',
'site_name': settings.CANONICAL_SITENAME,
'description': 'Terms and Conditions of Use, and '\
'Privacy Policies for Open Context',
'image': base_url + '/static/oc/images/index/oc-blue-square-logo.png',
'video': False
}
template = loader.get_template('about/terms.html')
context = {
'base_url': base_url,
'page_title': 'Open Context: About - Terms of Use and Privacy Policies',
'og': open_graph,
'act_nav': 'about',
'nav_items': settings.NAV_ITEMS
}
return HttpResponse(template.render(context))
else:
# client wanted a mimetype we don't support
return HttpResponse(req_neg.error_message,
status=415)
|
gpl-3.0
| 8,155,570,767,275,034,000
| 38.887129
| 84
| 0.578642
| false
| 4.006763
| false
| false
| false
|
polyaxon/polyaxon
|
core/polyaxon/utils/string_utils.py
|
1
|
2468
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import unicodedata
from decimal import Decimal
from typing import Callable
def strip_spaces(value, sep=None, join=True):
"""Cleans trailing whitespaces and replaces also multiple whitespaces with a single space."""
value = value.strip()
value = [v.strip() for v in value.split(sep)]
join_sep = sep or " "
return join_sep.join(value) if join else value
def is_protected_type(obj):
"""
A check for preserving a type as-is when passed to force_text(strings_only=True).
"""
return isinstance(
obj,
(
type(None),
int,
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
),
)
def force_bytes(value, encoding="utf-8", strings_only=False, errors="strict"):
"""
Resolve any value to strings.
If `strings_only` is True, skip protected objects.
"""
# Handle the common case first for performance reasons.
if isinstance(value, bytes):
if encoding == "utf-8":
return value
return value.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(value):
return value
if isinstance(value, memoryview):
return bytes(value)
return value.encode(encoding, errors)
def slugify(value: str, mark_safe: Callable = None) -> str:
"""
Convert spaces/dots to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Also strip leading and trailing whitespace.
"""
value = str(value)
value = (
unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii")
)
value = re.sub(r"[^\w\.\s-]", "", value).strip()
value = re.sub(r"[-\.\s]+", "-", value)
return mark_safe(value) if mark_safe else value
|
apache-2.0
| 7,351,634,692,586,380,000
| 29.469136
| 97
| 0.651945
| false
| 3.961477
| false
| false
| false
|
ruddra/django-oscar
|
oscar/apps/basket/abstract_models.py
|
1
|
28617
|
from decimal import Decimal
import zlib
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager
from oscar.apps.offer import results
from oscar.core.compat import AUTH_USER_MODEL
from oscar.templatetags.currency_filters import currency
class AbstractBasket(models.Model):
"""
Basket object
"""
# Baskets can be anonymously owned - hence this field is nullable. When a
# anon user signs in, their two baskets are merged.
owner = models.ForeignKey(
AUTH_USER_MODEL, related_name='baskets', null=True,
verbose_name=_("Owner"))
# Basket statuses
# - Frozen is for when a basket is in the process of being submitted
# and we need to prevent any changes to it.
OPEN, MERGED, SAVED, FROZEN, SUBMITTED = (
"Open", "Merged", "Saved", "Frozen", "Submitted")
STATUS_CHOICES = (
(OPEN, _("Open - currently active")),
(MERGED, _("Merged - superceded by another basket")),
(SAVED, _("Saved - for items to be purchased later")),
(FROZEN, _("Frozen - the basket cannot be modified")),
(SUBMITTED, _("Submitted - has been ordered at the checkout")),
)
status = models.CharField(
_("Status"), max_length=128, default=OPEN, choices=STATUS_CHOICES)
# A basket can have many vouchers attached to it. However, it is common
# for sites to only allow one voucher per basket - this will need to be
# enforced in the project's codebase.
vouchers = models.ManyToManyField(
'voucher.Voucher', null=True, verbose_name=_("Vouchers"), blank=True)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_merged = models.DateTimeField(_("Date merged"), null=True, blank=True)
date_submitted = models.DateTimeField(_("Date submitted"), null=True,
blank=True)
# Only if a basket is in one of these statuses can it be edited
editable_statuses = (OPEN, SAVED)
class Meta:
abstract = True
verbose_name = _('Basket')
verbose_name_plural = _('Baskets')
objects = models.Manager()
open = OpenBasketManager()
saved = SavedBasketManager()
def __init__(self, *args, **kwargs):
super(AbstractBasket, self).__init__(*args, **kwargs)
# We keep a cached copy of the basket lines as we refer to them often
# within the same request cycle. Also, applying offers will append
# discount data to the basket lines which isn't persisted to the DB and
# so we want to avoid reloading them as this would drop the discount
# information.
self._lines = None
self.offer_applications = results.OfferApplications()
def __unicode__(self):
return _(
u"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)") % {
'status': self.status,
'owner': self.owner,
'num_lines': self.num_lines}
# ========
# Strategy
# ========
@property
def has_strategy(self):
return hasattr(self, '_strategy')
def _get_strategy(self):
if not self.has_strategy:
raise RuntimeError(
"No strategy class has been assigned to this basket. "
"This is normally assigned to the incoming request in "
"oscar.apps.basket.middleware.BasketMiddleware. "
"Since it is missing, you must be doing something different. "
"Ensure that a strategy instance is assigned to the basket!"
)
return self._strategy
def _set_strategy(self, strategy):
self._strategy = strategy
strategy = property(_get_strategy, _set_strategy)
def all_lines(self):
"""
Return a cached set of basket lines.
This is important for offers as they alter the line models and you
don't want to reload them from the DB as that information would be
lost.
"""
if self.id is None:
return self.lines.none()
if self._lines is None:
self._lines = self.lines.select_related(
'product', 'product__stockrecord'
).all().prefetch_related('attributes', 'product__images')
# Assign strategy to each line so it can use it to determine
# prices. This is only needed for Django 1.4.5, where accessing
# self.basket from within the line will create a new basket
# instance (with no strategy assigned). In later version, the
# original basket instance is cached and keeps its strategy
# property.
for line in self._lines:
line.strategy = self.strategy
return self._lines
def is_quantity_allowed(self, qty):
"""
Test whether the passed quantity of items can be added to the basket
"""
# We enfore a max threshold to prevent a DOS attack via the offers
# system.
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
return False, _(
"Due to technical limitations we are not able "
"to ship more than %(threshold)d items in one order.") % {
'threshold': basket_threshold,
}
return True, None
# ============
# Manipulation
# ============
def flush(self):
"""
Remove all lines from basket.
"""
if self.status == self.FROZEN:
raise PermissionDenied("A frozen basket cannot be flushed")
self.lines.all().delete()
self._lines = None
def add_product(self, product, quantity=1, options=None):
"""
Add a product to the basket
'stock_info' is the price and availability data returned from
a partner strategy class.
The 'options' list should contains dicts with keys 'option' and 'value'
which link the relevant product.Option model and string value
respectively.
"""
if options is None:
options = []
if not self.id:
self.save()
# Ensure that all lines are the same currency
price_currency = self.currency
stock_info = self.strategy.fetch(product)
if price_currency and stock_info.price.currency != price_currency:
raise ValueError((
"Basket lines must all have the same currency. Proposed "
"line has currency %s, while basket has currency %s") % (
stock_info.price.currency, price_currency))
if stock_info.stockrecord is None:
raise ValueError((
"Basket lines must all have stock records. Strategy hasn't "
"found any stock record for product %s") % product)
# Line reference is used to distinguish between variations of the same
# product (eg T-shirts with different personalisations)
line_ref = self._create_line_reference(
product, stock_info.stockrecord, options)
# Determine price to store (if one exists). It is only stored for
# audit and sometimes caching.
defaults = {
'quantity': quantity,
'price_excl_tax': stock_info.price.excl_tax,
'price_currency': stock_info.price.currency,
}
if stock_info.price.is_tax_known:
defaults['price_incl_tax'] = stock_info.price.incl_tax
line, created = self.lines.get_or_create(
line_reference=line_ref,
product=product,
stockrecord=stock_info.stockrecord,
defaults=defaults)
if created:
for option_dict in options:
line.attributes.create(option=option_dict['option'],
value=option_dict['value'])
else:
line.quantity += quantity
line.save()
self.reset_offer_applications()
add_product.alters_data = True
add = add_product
def applied_offers(self):
"""
Return a dict of offers successfully applied to the basket.
This is used to compare offers before and after a basket change to see
if there is a difference.
"""
return self.offer_applications.offers
def reset_offer_applications(self):
"""
Remove any discounts so they get recalculated
"""
self.offer_applications = results.OfferApplications()
self._lines = None
def merge_line(self, line, add_quantities=True):
"""
For transferring a line from another basket to this one.
This is used with the "Saved" basket functionality.
"""
try:
existing_line = self.lines.get(line_reference=line.line_reference)
except ObjectDoesNotExist:
# Line does not already exist - reassign its basket
line.basket = self
line.save()
else:
# Line already exists - assume the max quantity is correct and
# delete the old
if add_quantities:
existing_line.quantity += line.quantity
else:
existing_line.quantity = max(existing_line.quantity,
line.quantity)
existing_line.save()
line.delete()
finally:
self._lines = None
merge_line.alters_data = True
def merge(self, basket, add_quantities=True):
"""
Merges another basket with this one.
:basket: The basket to merge into this one.
:add_quantities: Whether to add line quantities when they are merged.
"""
# Use basket.lines.all instead of all_lines as this function is called
# before a strategy has been assigned.
for line_to_merge in basket.lines.all():
self.merge_line(line_to_merge, add_quantities)
basket.status = self.MERGED
basket.date_merged = now()
basket._lines = None
basket.save()
merge.alters_data = True
def freeze(self):
"""
Freezes the basket so it cannot be modified.
"""
self.status = self.FROZEN
self.save()
freeze.alters_data = True
def thaw(self):
"""
Unfreezes a basket so it can be modified again
"""
self.status = self.OPEN
self.save()
thaw.alters_data = True
def submit(self):
"""
Mark this basket as submitted
"""
self.status = self.SUBMITTED
self.date_submitted = now()
self.save()
submit.alters_data = True
# Kept for backwards compatibility
set_as_submitted = submit
def is_shipping_required(self):
"""
Test whether the basket contains physical products that require
shipping.
"""
for line in self.all_lines():
if line.product.is_shipping_required:
return True
return False
# =======
# Helpers
# =======
def _create_line_reference(self, product, stockrecord, options):
"""
Returns a reference string for a line based on the item
and its options.
"""
base = '%s_%s' % (product.id, stockrecord.id)
if not options:
return base
return "%s_%s" % (base, zlib.crc32(str(options)))
def _get_total(self, property):
"""
For executing a named method on each line of the basket
and returning the total.
"""
total = Decimal('0.00')
for line in self.all_lines():
try:
total += getattr(line, property)
except ObjectDoesNotExist:
# Handle situation where the product may have been deleted
pass
return total
# ==========
# Properties
# ==========
@property
def is_empty(self):
"""
Test if this basket is empty
"""
return self.id is None or self.num_lines == 0
@property
def is_tax_known(self):
"""
Test if tax values are known for this basket
"""
return all([line.is_tax_known for line in self.all_lines()])
@property
def total_excl_tax(self):
"""
Return total line price excluding tax
"""
return self._get_total('line_price_excl_tax_incl_discounts')
@property
def total_tax(self):
"""Return total tax for a line"""
return self._get_total('line_tax')
@property
def total_incl_tax(self):
"""
Return total price inclusive of tax and discounts
"""
return self._get_total('line_price_incl_tax_incl_discounts')
@property
def total_incl_tax_excl_discounts(self):
"""
Return total price inclusive of tax but exclusive discounts
"""
return self._get_total('line_price_incl_tax')
@property
def total_discount(self):
return self._get_total('discount_value')
@property
def offer_discounts(self):
"""
Return basket discounts from non-voucher sources. Does not include
shipping discounts.
"""
return self.offer_applications.offer_discounts
@property
def voucher_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.voucher_discounts
@property
def shipping_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.shipping_discounts
@property
def post_order_actions(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.post_order_actions
@property
def grouped_voucher_discounts(self):
"""
Return discounts from vouchers but grouped so that a voucher which
links to multiple offers is aggregated into one object.
"""
return self.offer_applications.grouped_voucher_discounts
@property
def total_excl_tax_excl_discounts(self):
"""
Return total price excluding tax and discounts
"""
return self._get_total('line_price_excl_tax')
@property
def num_lines(self):
"""Return number of lines"""
return self.lines.all().count()
@property
def num_items(self):
"""Return number of items"""
return reduce(
lambda num, line: num + line.quantity, self.lines.all(), 0)
@property
def num_items_without_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_without_discount
return num
@property
def num_items_with_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_with_discount
return num
@property
def time_before_submit(self):
if not self.date_submitted:
return None
return self.date_submitted - self.date_created
@property
def time_since_creation(self, test_datetime=None):
if not test_datetime:
test_datetime = now()
return test_datetime - self.date_created
@property
def contains_a_voucher(self):
if not self.id:
return False
return self.vouchers.all().count() > 0
@property
def is_submitted(self):
return self.status == self.SUBMITTED
@property
def can_be_edited(self):
"""
Test if a basket can be edited
"""
return self.status in self.editable_statuses
@property
def currency(self):
# Since all lines should have the same currency, return the currency of
# the first one found.
for line in self.all_lines():
return line.price_currency
# =============
# Query methods
# =============
def contains_voucher(self, code):
"""
Test whether the basket contains a voucher with a given code
"""
if self.id is None:
return False
try:
self.vouchers.get(code=code)
except ObjectDoesNotExist:
return False
else:
return True
def product_quantity(self, product):
"""
Return the quantity of a product in the basket
The basket can contain multiple lines with the same product, but
different options and stockrecords. Those quantities are summed up.
"""
matching_lines = self.lines.filter(product=product)
quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']
return quantity or 0
def line_quantity(self, product, stockrecord, options=None):
"""
Return the current quantity of a specific product and options
"""
ref = self._create_line_reference(product, stockrecord, options)
try:
return self.lines.get(line_reference=ref).quantity
except ObjectDoesNotExist:
return 0
class AbstractLine(models.Model):
"""
A line of a basket (product and a quantity)
"""
basket = models.ForeignKey('basket.Basket', related_name='lines',
verbose_name=_("Basket"))
# This is to determine which products belong to the same line
# We can't just use product.id as you can have customised products
# which should be treated as separate lines. Set as a
# SlugField as it is included in the path for certain views.
line_reference = models.SlugField(
_("Line Reference"), max_length=128, db_index=True)
product = models.ForeignKey(
'catalogue.Product', related_name='basket_lines',
verbose_name=_("Product"))
# We store the stockrecord that should be used to fulfil this line. This
# shouldn't really be NULLable but we need to keep it so for backwards
# compatibility.
stockrecord = models.ForeignKey(
'partner.StockRecord', related_name='basket_lines',
null=True, blank=True)
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
# We store the unit price incl tax of the product when it is first added to
# the basket. This allows us to tell if a product has changed price since
# a person first added it to their basket.
price_currency = models.CharField(
_("Currency"), max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY)
price_excl_tax = models.DecimalField(
_('Price excl. Tax'), decimal_places=2, max_digits=12,
null=True)
price_incl_tax = models.DecimalField(
_('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)
# Track date of first addition
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __init__(self, *args, **kwargs):
super(AbstractLine, self).__init__(*args, **kwargs)
# Instance variables used to persist discount information
self._discount_excl_tax = Decimal('0.00')
self._discount_incl_tax = Decimal('0.00')
self._affected_quantity = 0
class Meta:
abstract = True
unique_together = ("basket", "line_reference")
verbose_name = _('Basket line')
verbose_name_plural = _('Basket lines')
def __unicode__(self):
return _(
u"Basket #%(basket_id)d, Product #%(product_id)d, quantity %(quantity)d") % {
'basket_id': self.basket.pk,
'product_id': self.product.pk,
'quantity': self.quantity}
def save(self, *args, **kwargs):
"""
Saves a line or deletes if the quantity is 0
"""
if not self.basket.can_be_edited:
raise PermissionDenied(
_("You cannot modify a %s basket") % (
self.basket.status.lower(),))
if self.quantity == 0:
return self.delete(*args, **kwargs)
return super(AbstractLine, self).save(*args, **kwargs)
# =============
# Offer methods
# =============
def clear_discount(self):
"""
Remove any discounts from this line.
"""
self._discount_excl_tax = Decimal('0.00')
self._discount_incl_tax = Decimal('0.00')
self._affected_quantity = 0
def discount(self, discount_value, affected_quantity, incl_tax=True):
"""
Apply a discount to this line
Note that it only makes sense to apply
"""
if incl_tax:
if self._discount_excl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-inclusive price of a line "
"when tax-exclusive discounts are already applied")
self._discount_incl_tax += discount_value
else:
if self._discount_incl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-exclusive price of a line "
"when tax-inclusive discounts are already applied")
self._discount_excl_tax += discount_value
self._affected_quantity += int(affected_quantity)
def consume(self, quantity):
"""
Mark all or part of the line as 'consumed'
Consumed items are no longer available to be used in offers.
"""
if quantity > self.quantity - self._affected_quantity:
inc = self.quantity - self._affected_quantity
else:
inc = quantity
self._affected_quantity += int(inc)
def get_price_breakdown(self):
"""
Return a breakdown of line prices after discounts have been applied.
Returns a list of (unit_price_incl_tx, unit_price_excl_tax, quantity)
tuples.
"""
if not self.is_tax_known:
raise RuntimeError("A price breakdown can only be determined "
"when taxes are known")
prices = []
if not self.has_discount:
prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,
self.quantity))
else:
# Need to split the discount among the affected quantity
# of products.
item_incl_tax_discount = (
self.discount_value / int(self._affected_quantity))
item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio
prices.append((self.unit_price_incl_tax - item_incl_tax_discount,
self.unit_price_excl_tax - item_excl_tax_discount,
self._affected_quantity))
if self.quantity_without_discount:
prices.append((self.unit_price_incl_tax,
self.unit_price_excl_tax,
self.quantity_without_discount))
return prices
# =======
# Helpers
# =======
@property
def _tax_ratio(self):
if not self.unit_price_incl_tax:
return 0
return self.unit_price_excl_tax / self.unit_price_incl_tax
# ==========
# Properties
# ==========
@property
def has_discount(self):
return self.quantity > self.quantity_without_discount
@property
def quantity_with_discount(self):
return self._affected_quantity
@property
def quantity_without_discount(self):
return int(self.quantity - self._affected_quantity)
@property
def is_available_for_discount(self):
return self.quantity_without_discount > 0
@property
def discount_value(self):
# Only one of the incl- and excl- discounts should be non-zero
return max(self._discount_incl_tax, self._discount_excl_tax)
@property
def stockinfo(self):
"""
Return the stock/price info
"""
if not hasattr(self, '_info'):
# Cache the stockinfo (note that a strategy instance is assigned to
# each line by the basket in the all_lines method).
self._info = self.strategy.fetch(
self.product, self.stockrecord)
return self._info
@property
def is_tax_known(self):
if not hasattr(self, 'strategy'):
return False
return self.stockinfo.price.is_tax_known
@property
def unit_price_excl_tax(self):
return self.stockinfo.price.excl_tax
@property
def unit_price_incl_tax(self):
return self.stockinfo.price.incl_tax
@property
def unit_tax(self):
return self.stockinfo.price.tax
@property
def line_price_excl_tax(self):
return self.quantity * self.unit_price_excl_tax
@property
def line_price_excl_tax_incl_discounts(self):
if self._discount_excl_tax:
return self.line_price_excl_tax - self._discount_excl_tax
if self._discount_incl_tax:
# This is a tricky situation. We know the discount as calculated
# against tax inclusive prices but we need to guess how much of the
# discount applies to tax-exclusive prices. We do this by
# assuming a linear tax and scaling down the original discount.
return self.line_price_excl_tax - self._tax_ratio * self._discount_incl_tax
return self.line_price_excl_tax
@property
def line_price_incl_tax_incl_discounts(self):
# We use whichever discount value is set. If the discount value was
# calculated against the tax-exclusive prices, then the line price
# including tax
return self.line_price_incl_tax - self.discount_value
@property
def line_tax(self):
return self.quantity * self.unit_tax
@property
def line_price_incl_tax(self):
return self.quantity * self.unit_price_incl_tax
@property
def description(self):
d = str(self.product)
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.option.name, attribute.value))
if ops:
d = "%s (%s)" % (d.decode('utf-8'), ", ".join(ops))
return d
def get_warning(self):
"""
Return a warning message about this basket line if one is applicable
This could be things like the price has changed
"""
if not self.stockrecord:
msg = u"'%(product)s' is no longer available"
return _(msg) % {'product': self.product.get_title()}
if not self.price_incl_tax:
return
if not self.stockinfo.price.is_tax_known:
return
# Compare current price to price when added to basket
current_price_incl_tax = self.stockinfo.price.incl_tax
if current_price_incl_tax > self.price_incl_tax:
msg = ("The price of '%(product)s' has increased from "
"%(old_price)s to %(new_price)s since you added it "
"to your basket")
return _(msg) % {
'product': self.product.get_title(),
'old_price': currency(self.price_incl_tax),
'new_price': currency(current_price_incl_tax)}
if current_price_incl_tax < self.price_incl_tax:
msg = ("The price of '%(product)s' has decreased from "
"%(old_price)s to %(new_price)s since you added it "
"to your basket")
return _(msg) % {
'product': self.product.get_title(),
'old_price': currency(self.price_incl_tax),
'new_price': currency(current_price_incl_tax)}
class AbstractLineAttribute(models.Model):
"""
An attribute of a basket line
"""
line = models.ForeignKey('basket.Line', related_name='attributes',
verbose_name=_("Line"))
option = models.ForeignKey('catalogue.Option', verbose_name=_("Option"))
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
verbose_name = _('Line attribute')
verbose_name_plural = _('Line attributes')
|
bsd-3-clause
| 7,346,142,169,669,723,000
| 33.067857
| 89
| 0.587029
| false
| 4.337881
| false
| false
| false
|
chrislit/abydos
|
abydos/distance/_clement.py
|
1
|
4649
|
# Copyright 2018-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._clement.
Clement similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Clement']
class Clement(_TokenDistance):
r"""Clement similarity.
For two sets X and Y and a population N, Clement similarity
:cite:`Clement:1976` is defined as
.. math::
sim_{Clement}(X, Y) =
\frac{|X \cap Y|}{|X|}\Big(1-\frac{|X|}{|N|}\Big) +
\frac{|(N \setminus X) \setminus Y|}{|N \setminus X|}
\Big(1-\frac{|N \setminus X|}{|N|}\Big)
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{Clement} =
\frac{a}{a+b}\Big(1 - \frac{a+b}{n}\Big) +
\frac{d}{c+d}\Big(1 - \frac{c+d}{n}\Big)
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize Clement instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(Clement, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Clement similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Clement similarity
Examples
--------
>>> cmp = Clement()
>>> cmp.sim('cat', 'hat')
0.5025379382522239
>>> cmp.sim('Niall', 'Neil')
0.33840586363079933
>>> cmp.sim('aluminum', 'Catalan')
0.12119877280918714
>>> cmp.sim('ATCG', 'TAGC')
0.006336616803332366
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
n = self._population_unique_card()
score = 0.0
if a + b:
score += (a / (a + b)) * (1 - (a + b) / n)
if c + d:
score += (d / (c + d)) * (1 - (c + d) / n)
return score
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-3.0
| -6,004,276,056,573,090,000
| 28.238994
| 78
| 0.562056
| false
| 3.933164
| false
| false
| false
|
thethythy/Mnemopwd
|
mnemopwd/client/corelayer/protocol/StateS33R.py
|
1
|
2965
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
State S33 : Deletion
"""
from ...util.funcutils import singleton
from .StateSCC import StateSCC
@singleton
class StateS33R(StateSCC):
"""State S33 : Deletion"""
def do(self, handler, data):
"""Action of the state S33R: user account deletion request"""
with handler.lock:
try:
# Challenge creation
echallenge = self.compute_challenge(handler, b"S33.7")
if echallenge:
# Encrypt login
elogin = handler.ephecc.encrypt(
handler.login, pubkey=handler.ephecc.get_pubkey())
# Compute then encrypt id
id = self.compute_client_id(handler.ms, handler.login)
eid = handler.ephecc.encrypt(
id, pubkey=handler.ephecc.get_pubkey())
# Send deletion request
msg = echallenge + b';DELETION;' + eid + b';' + elogin
handler.loop.call_soon_threadsafe(handler.transport.write, msg)
# Notify the handler a property has changed
handler.loop.run_in_executor(
None, handler.notify, "connection.state",
"User account deletion request")
except Exception as exc:
# Schedule a call to the exception handler
handler.loop.call_soon_threadsafe(handler.exception_handler, exc)
else:
handler.state = handler.states['33A'] # Next state
|
bsd-2-clause
| 3,113,484,126,458,670,000
| 40.760563
| 83
| 0.651939
| false
| 4.526718
| false
| false
| false
|
jlane9/selenium_data_attributes
|
setup.py
|
1
|
1363
|
"""setup.py
..codeauthor:: John Lane <jlane@fanthreesixty.com>
"""
from setuptools import setup, find_packages
from sda import __author__, __email__, __license__, __version__
setup(
name='sda',
version=__version__,
packages=find_packages(),
scripts=[],
description='A wrapper for Selenium. This library uses custom data attributes to accelerate '
'testing through the Selenium framework',
author=__author__,
author_email=__email__,
url='https://github.com/jlane9/selenium-data-attributes',
download_url='https://github.com/jlane9/selenium-data-attributes/tarball/{}'.format(__version__),
keywords='testing selenium qa web automation',
install_requires=['lxml', 'cssselect'],
license=__license__,
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing'])
|
mit
| 7,108,862,239,312,840,000
| 39.088235
| 101
| 0.598679
| false
| 4.589226
| false
| false
| false
|
AstroPrint/AstroBox
|
src/octoprint/timelapse.py
|
1
|
12285
|
# coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import logging
import os
import threading
import urllib
import time
import subprocess
import fnmatch
import datetime
import sys
import shutil
import octoprint.util as util
from octoprint.settings import settings
from octoprint.events import eventManager, Events
import sarge
# currently configured timelapse
current = None
def getFinishedTimelapses():
files = []
basedir = settings().getBaseFolder("timelapse")
for osFile in os.listdir(basedir):
if not fnmatch.fnmatch(osFile, "*.mpg"):
continue
statResult = os.stat(os.path.join(basedir, osFile))
files.append({
"name": osFile,
"size": util.getFormattedSize(statResult.st_size),
"bytes": statResult.st_size,
"date": util.getFormattedDateTime(datetime.datetime.fromtimestamp(statResult.st_ctime))
})
return files
validTimelapseTypes = ["off", "timed", "zchange"]
updateCallbacks = []
def registerCallback(callback):
if not callback in updateCallbacks:
updateCallbacks.append(callback)
def unregisterCallback(callback):
if callback in updateCallbacks:
updateCallbacks.remove(callback)
def notifyCallbacks(timelapse):
if timelapse is None:
config = None
else:
config = timelapse.configData()
for callback in updateCallbacks:
try: callback.sendTimelapseConfig(config)
except: pass
def configureTimelapse(config=None, persist=False):
global current
if config is None:
config = settings().get(["webcam", "timelapse"])
if current is not None:
current.unload()
type = config["type"]
postRoll = 0
if "postRoll" in config:
postRoll = config["postRoll"]
if type is None or "off" == type:
current = None
elif "zchange" == type:
current = ZTimelapse(postRoll=postRoll)
elif "timed" == type:
interval = 10
if "options" in config and "interval" in config["options"]:
interval = config["options"]["interval"]
current = TimedTimelapse(postRoll=postRoll, interval=interval)
notifyCallbacks(current)
if persist:
settings().set(["webcam", "timelapse"], config)
settings().save()
class Timelapse(object):
def __init__(self, postRoll=0):
self._logger = logging.getLogger(__name__)
self._imageNumber = None
self._inTimelapse = False
self._gcodeFile = None
self._postRoll = postRoll
self._postRollStart = None
self._onPostRollDone = None
self._captureDir = settings().getBaseFolder("timelapse_tmp")
self._movieDir = settings().getBaseFolder("timelapse")
self._snapshotUrl = settings().get(["webcam", "snapshot"])
self._fps = 25
self._renderThread = None
self._captureMutex = threading.Lock()
# subscribe events
eventManager().subscribe(Events.PRINT_STARTED, self.onPrintStarted)
eventManager().subscribe(Events.PRINT_FAILED, self.onPrintDone)
eventManager().subscribe(Events.PRINT_DONE, self.onPrintDone)
eventManager().subscribe(Events.PRINT_RESUMED, self.onPrintResumed)
for (event, callback) in self.eventSubscriptions():
eventManager().subscribe(event, callback)
def postRoll(self):
return self._postRoll
def unload(self):
if self._inTimelapse:
self.stopTimelapse(doCreateMovie=False)
# unsubscribe events
eventManager().unsubscribe(Events.PRINT_STARTED, self.onPrintStarted)
eventManager().unsubscribe(Events.PRINT_FAILED, self.onPrintDone)
eventManager().unsubscribe(Events.PRINT_DONE, self.onPrintDone)
eventManager().unsubscribe(Events.PRINT_RESUMED, self.onPrintResumed)
for (event, callback) in self.eventSubscriptions():
eventManager().unsubscribe(event, callback)
def onPrintStarted(self, event, payload):
"""
Override this to perform additional actions upon start of a print job.
"""
self.startTimelapse(payload["file"]['printFileName'])
def onPrintDone(self, event, payload):
"""
Override this to perform additional actions upon the stop of a print job.
"""
self.stopTimelapse(success=(event==Events.PRINT_DONE))
def onPrintResumed(self, event, payload):
"""
Override this to perform additional actions upon the pausing of a print job.
"""
if not self._inTimelapse:
self.startTimelapse(payload["file"])
def eventSubscriptions(self):
"""
Override this method to subscribe to additional events by returning an array of (event, callback) tuples.
Events that are already subscribed:
* PrintStarted - self.onPrintStarted
* PrintResumed - self.onPrintResumed
* PrintFailed - self.onPrintDone
* PrintDone - self.onPrintDone
"""
return []
def configData(self):
"""
Override this method to return the current timelapse configuration data. The data should have the following
form:
type: "<type of timelapse>",
options: { <additional options> }
"""
return None
def startTimelapse(self, gcodeFile):
self._logger.debug("Starting timelapse for %s" % gcodeFile)
self.cleanCaptureDir()
self._imageNumber = 0
self._inTimelapse = True
self._gcodeFile = os.path.basename(gcodeFile)
def stopTimelapse(self, doCreateMovie=True, success=True):
self._logger.debug("Stopping timelapse")
self._inTimelapse = False
def resetImageNumber():
self._imageNumber = None
def createMovie():
self._renderThread = threading.Thread(target=self._createMovie, kwargs={"success": success})
self._renderThread.daemon = True
self._renderThread.start()
def resetAndCreate():
resetImageNumber()
createMovie()
if self._postRoll > 0:
self._postRollStart = time.time()
if doCreateMovie:
self._onPostRollDone = resetAndCreate
else:
self._onPostRollDone = resetImageNumber
self.processPostRoll()
else:
self._postRollStart = None
if doCreateMovie:
resetAndCreate()
else:
resetImageNumber()
def processPostRoll(self):
pass
def captureImage(self):
if self._captureDir is None:
self._logger.warn("Cannot capture image, capture directory is unset")
return
with self._captureMutex:
filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber))
self._imageNumber += 1
self._logger.debug("Capturing image to %s" % filename)
captureThread = threading.Thread(target=self._captureWorker, kwargs={"filename": filename})
captureThread.daemon = True
captureThread.start()
return filename
def _captureWorker(self, filename):
eventManager().fire(Events.CAPTURE_START, {"file": filename})
try:
urllib.urlretrieve(self._snapshotUrl, filename)
self._logger.debug("Image %s captured from %s" % (filename, self._snapshotUrl))
except:
self._logger.exception("Could not capture image %s from %s, decreasing image counter again" % (filename, self._snapshotUrl))
if self._imageNumber is not None and self._imageNumber > 0:
self._imageNumber -= 1
eventManager().fire(Events.CAPTURE_DONE, {"file": filename})
def _createMovie(self, success=True):
ffmpeg = settings().get(["webcam", "ffmpeg"])
bitrate = settings().get(["webcam", "bitrate"])
if ffmpeg is None or bitrate is None:
self._logger.warn("Cannot create movie, path to ffmpeg or desired bitrate is unset")
return
input = os.path.join(self._captureDir, "tmp_%05d.jpg")
if success:
output = os.path.join(self._movieDir, "%s_%s.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S")))
else:
output = os.path.join(self._movieDir, "%s_%s-failed.mpg" % (os.path.splitext(self._gcodeFile)[0], time.strftime("%Y%m%d%H%M%S")))
# prepare ffmpeg command
command = [
ffmpeg, '-loglevel', 'error', '-i', input, '-vcodec', 'mpeg2video', '-pix_fmt', 'yuv420p', '-r', str(self._fps), '-y', '-b:v', bitrate,
'-f', 'vob']
filters = []
# flip video if configured
if settings().getBoolean(["webcam", "flipH"]):
filters.append('hflip')
if settings().getBoolean(["webcam", "flipV"]):
filters.append('vflip')
# add watermark if configured
watermarkFilter = None
if settings().getBoolean(["webcam", "watermark"]):
watermark = os.path.join(os.path.dirname(__file__), "static", "img", "watermark.png")
if sys.platform == "win32":
# Because ffmpeg hiccups on windows' drive letters and backslashes we have to give the watermark
# path a special treatment. Yeah, I couldn't believe it either...
watermark = watermark.replace("\\", "/").replace(":", "\\\\:")
watermarkFilter = "movie=%s [wm]; [%%(inputName)s][wm] overlay=10:main_h-overlay_h-10" % watermark
filterstring = None
if len(filters) > 0:
if watermarkFilter is not None:
filterstring = "[in] %s [postprocessed]; %s [out]" % (",".join(filters), watermarkFilter % {"inputName": "postprocessed"})
else:
filterstring = "[in] %s [out]" % ",".join(filters)
elif watermarkFilter is not None:
filterstring = watermarkFilter % {"inputName": "in"} + " [out]"
if filterstring is not None:
self._logger.debug("Applying videofilter chain: %s" % filterstring)
command.extend(["-vf", sarge.shell_quote(filterstring)])
# finalize command with output file
self._logger.debug("Rendering movie to %s" % output)
command.append(output)
eventManager().fire(Events.MOVIE_RENDERING, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)})
command_str = " ".join(command)
self._logger.debug("Executing command: %s" % command_str)
p = sarge.run(command_str, stderr=sarge.Capture())
if p.returncode == 0:
eventManager().fire(Events.MOVIE_DONE, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output)})
else:
returncode = p.returncode
stderr_text = p.stderr.text
self._logger.warn("Could not render movie, got return code %r: %s" % (returncode, stderr_text))
eventManager().fire(Events.MOVIE_FAILED, {"gcode": self._gcodeFile, "movie": output, "movie_basename": os.path.basename(output), "returncode": returncode, "error": stderr_text})
def cleanCaptureDir(self):
if not os.path.isdir(self._captureDir):
self._logger.warn("Cannot clean capture directory, it is unset")
return
for filename in os.listdir(self._captureDir):
if not fnmatch.fnmatch(filename, "*.jpg"):
continue
os.remove(os.path.join(self._captureDir, filename))
class ZTimelapse(Timelapse):
def __init__(self, postRoll=0):
Timelapse.__init__(self, postRoll=postRoll)
self._logger.debug("ZTimelapse initialized")
def eventSubscriptions(self):
return [
("ZChange", self._onZChange)
]
def configData(self):
return {
"type": "zchange"
}
def processPostRoll(self):
Timelapse.processPostRoll(self)
filename = os.path.join(self._captureDir, "tmp_%05d.jpg" % self._imageNumber)
self._imageNumber += 1
with self._captureMutex:
self._captureWorker(filename)
for i in range(self._postRoll * self._fps):
newFile = os.path.join(self._captureDir, "tmp_%05d.jpg" % (self._imageNumber))
self._imageNumber += 1
shutil.copyfile(filename, newFile)
if self._onPostRollDone is not None:
self._onPostRollDone()
def _onZChange(self, event, payload):
self.captureImage()
class TimedTimelapse(Timelapse):
def __init__(self, postRoll=0, interval=1):
Timelapse.__init__(self, postRoll=postRoll)
self._interval = interval
if self._interval < 1:
self._interval = 1 # force minimum interval of 1s
self._timerThread = None
self._logger.debug("TimedTimelapse initialized")
def interval(self):
return self._interval
def configData(self):
return {
"type": "timed",
"options": {
"interval": self._interval
}
}
def onPrintStarted(self, event, payload):
Timelapse.onPrintStarted(self, event, payload)
if self._timerThread is not None:
return
self._timerThread = threading.Thread(target=self._timerWorker)
self._timerThread.daemon = True
self._timerThread.start()
def onPrintDone(self, event, payload):
Timelapse.onPrintDone(self, event, payload)
self._timerThread = None
def _timerWorker(self):
self._logger.debug("Starting timer for interval based timelapse")
while self._inTimelapse or (self._postRollStart and time.time() - self._postRollStart <= self._postRoll * self._fps):
self.captureImage()
time.sleep(self._interval)
if self._postRollStart is not None and self._onPostRollDone is not None:
self._onPostRollDone()
self._postRollStart = None
|
agpl-3.0
| 7,949,570,891,765,914,000
| 29.403465
| 180
| 0.707726
| false
| 3.272848
| true
| false
| false
|
davidwhogg/HoneyComb
|
superpgram/code/header_time.py
|
1
|
1992
|
import numpy as np
import pyfits
import glob
import matplotlib.pyplot as plt
def real_footprint(t):
"""
# `real_footprint`
Takes real Kepler (BJD) time values for a certain target and returns
estimates of the starts, stops and centres
"""
dt = 0.02043359821692 # interval between observations (days)
stops = t
starts = t - dt
centres = t - .5*dt
return starts, stops, centres
def real_footprint_sc(t):
"""
# `real_footprint`
Takes real Kepler (BJD) time values for a certain target and returns
estimates of the starts, stops and centres
"""
dt = 6.81119940564e-4 # interval between observations (days) (sc)
stops = t
starts = t - dt
centres = t - .5*dt
return starts, stops, centres
def bjd2utc(t):
"""
# `bjd2utc`
Takes real Kepler (BJD) time values for a certain target.
Returns the spacecraft-UTC times (in days).
A is an array of coefficients for a sinusoid + linear trend, fit to the
timing data of 491 asteroseismic targets that are randomly distributed on
the CCD.
"""
A = np.genfromtxt("A.txt").T
w = 2*np.pi/372.5 # angular frequency (days-1)
return t + A[0]*np.sin(w*t) + A[1]*np.cos(w*t) + A[2]*t + A[3]
if __name__ == "__main__":
# Test on a real target
D = "/Users/angusr/angusr/data2"
kid = "7341231" # a kepler target with lc and sc data chosen at `random'
fnames = []
qs = range(17)
x = []
for q in qs:
fnames.append(glob.glob("%s/Q%s_public/kplr%s-*_llc.fits"
% (D, q, kid.zfill(9)))[0])
# load test fits file
for fname in fnames:
hdulist = pyfits.open(fname)
tbdata = hdulist[1].data
x.extend(tbdata["TIME"])
# convert BJDs to UTCs
x = np.array(x) + 2454833 # days
utc = bjd2utc(x)
# plot correction
plt.clf()
plt.plot(x, utc, "k.")
plt.xlabel("BJD")
plt.ylabel("BJD-UTC (days)")
plt.savefig("demonstrate")
|
mit
| -7,558,977,120,474,102,000
| 25.918919
| 77
| 0.603916
| false
| 3.083591
| false
| false
| false
|
LRGH/amoco
|
amoco/arch/v850/env.py
|
1
|
3017
|
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2018 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# import expressions:
from amoco.cas.expressions import *
# registers :
# -----------
# main reg set:
R = [reg("r%d" % r, 32) for r in range(32)]
with is_reg_flags:
PSW = reg("psw", 32) # program-status word
Z = slc(PSW, 0, 1, "z") # Zero
S = slc(PSW, 1, 1, "s") # Sign
OV = slc(PSW, 2, 1, "ov") # Overlfow
CY = slc(PSW, 3, 1, "cy") # Carry
SAT = slc(PSW, 4, 1, "sat") # Saturation
ID = slc(PSW, 5, 1, "id") # EI exception (TRAP)
EP = slc(PSW, 6, 1, "ep") # exception type (0: interrupt, 1:other)
NP = slc(PSW, 7, 1, "np") # FE exception
IMP = slc(
PSW, 16, 1, "imp"
) # instruction memory protection (0: trusted, 1: not trusted)
DMP = slc(PSW, 17, 1, "dmp") # data memory protection (0: trusted, 1: not trusted)
NPV = slc(PSW, 18, 1, "npv") # non-protected value (0: trusted, 1: not trusted)
with is_reg_pc:
pc = reg("pc", 16)
with is_reg_stack:
sp = reg("sp", 32) # stack ptr
R[0] = cst(0, 32).to_sym("zero")
R[3] = sp
R[4] = reg("gp", 32) # global variable ptr
R[5] = reg("tp", 32) # text area ptr
R[30] = reg("ep", 32) # array/struct base ptr
R[31] = reg("lp", 32) # link ptr
# system registers:
EIPC = reg("eipc", 32)
EIPSW = reg("eipsw", 32)
FEPC = reg("fepc", 32)
FEPSW = reg("fepsw", 32)
ECR = reg("ecr", 32) # exception cause
SCCFG = reg("sccfg", 32) # SYSCAL op setting
SCBP = reg("scbp", 32) # SYSCAL base ptr
EIIC = reg("eiic", 32)
FEIC = reg("feic", 32)
DBIC = reg("dbic", 32)
CTPC = reg("ctpc", 32)
CTPSW = reg("ctpsw", 32)
DBPC = reg("dbpc", 32)
DBPSW = reg("dbpsw", 32)
CTBP = reg("ctbp", 32) # CALLT base ptr
EIWR = reg("eiwr", 32)
FEWR = reg("fewr", 32)
DBWR = reg("dbwr", 32)
BSEL = reg("bsel", 32) # register bank select
BNK = slc(BSEL, 0, 8, "bnk")
GRP = slc(BSEL, 8, 8, "grp")
CONDITION_V = 0b0000 # ==
CONDITION_NV = 0b1000 # !=
CONDITION_C = 0b0001 # >= (unsigned)
CONDITION_NC = 0b1001 # < (unsigned)
CONDITION_Z = 0b0010 # <0
CONDITION_NZ = 0b1010 # <0
CONDITION_NH = 0b0011 # <0
CONDITION_H = 0b1011 # <0
CONDITION_S = 0b0100 # <0
CONDITION_NS = 0b1100 # <0
CONDITION_T = 0b0101 # <0
CONDITION_SA = 0b1101 # <0
CONDITION_LT = 0b0110 # <0
CONDITION_GE = 0b1110 # <0
CONDITION_LE = 0b0111 # <0
CONDITION_GT = 0b1111 # <0
CONDITION = {
CONDITION_V: ("v", OV == 1),
CONDITION_NV: ("nv", OV == 0),
CONDITION_C: ("c", CY == 1),
CONDITION_NC: ("nc", CY == 0),
CONDITION_Z: ("z", Z == 1),
CONDITION_NZ: ("nz", Z == 0),
CONDITION_NH: ("nh", (CY | Z) == 1),
CONDITION_H: ("h", (CY | Z) == 0),
CONDITION_S: ("neg", S == 1),
CONDITION_NS: ("pos", S == 0),
CONDITION_T: ("", bit1),
CONDITION_SA: ("sat", SAT == 1),
CONDITION_LT: ("lt", (S ^ OV) == 1),
CONDITION_GE: ("ge", (S ^ OV) == 0),
CONDITION_LE: ("le", ((S ^ OV) | Z) == 1),
CONDITION_GT: ("gt", ((S ^ OV) | Z) == 0),
}
|
gpl-2.0
| -6,677,247,967,779,939,000
| 27.462264
| 87
| 0.551541
| false
| 2.304813
| false
| false
| false
|
arthurdk/gk-analysis
|
GKVisualizer.py
|
1
|
7581
|
import plotly
from plotly.graph_objs import Scatter, Layout, Bar, Figure
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import plotly.graph_objs as go
'''
class VisualizationStrategy:
def __init__(self):
pass
# Homemade enumeration
Plot, CSV, ASCII = range(3)
'''
import random
class GKVisualizer:
def __init__(self, reviewers_filtering, group_by_option='nothing',
rating_filters=[],
word_cloud_background="white",
color_scheme=None):
self.reviewers_filtering = reviewers_filtering
self.group_by = group_by_option
self.rating_filters = rating_filters
self.word_cloud_background = word_cloud_background
self.word_cloud_color_scheme = color_scheme
@staticmethod
def _grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
"""
Credit to word_cloud project on github
:param word:
:param font_size:
:param position:
:param orientation:
:param random_state:
:param kwargs:
:return:
"""
return "hsl(0, 0%%, %d%%)" % random.randint(60, 100)
def word_cloud(self, frequencies, mask=None):
if mask is not None:
word_cloud = WordCloud(background_color=self.word_cloud_background,
width=1200,
height=1000,
mask=mask
).generate_from_frequencies(frequencies)
else:
word_cloud = WordCloud(background_color=self.word_cloud_background,
width=1200,
height=1000
).generate_from_frequencies(frequencies)
if self.word_cloud_color_scheme is not None:
plt.imshow(word_cloud.recolor(color_func=GKVisualizer._grey_color_func, random_state=3))
else:
plt.imshow(word_cloud)
plt.axis('off')
plt.show()
@staticmethod
def display_gauge(labels, target, title):
value = 100.0 / len(labels)
values = [value] * len(labels)
base_chart = {
"values": values,
"domain": {"x": [0, .48]},
"marker": {
"line": {
"width": 1
}
},
"name": "Gauge",
"hole": .4,
"type": "pie",
"direction": "clockwise",
"showlegend": False,
"hoverinfo": "none",
"textinfo": "none",
"textposition": "outside"
}
meter_chart = {
"values": values,
"labels": labels,
'textfont': {
"size": 22,
"color": "white"
},
"domain": {"x": [0, 0.48]},
"name": "Gauge",
"hole": .3,
"type": "pie",
"direction": "clockwise",
"showlegend": False,
"textinfo": "label",
"textposition": "inside",
"hoverinfo": "none"
}
layout = {
'title': title,
'xaxis': {
'showticklabels': False,
'autotick': False,
'showgrid': False,
'zeroline': False,
},
'yaxis': {
'showticklabels': False,
'autotick': False,
'showgrid': False,
'zeroline': False,
},
'annotations': [
{
'xref': 'paper',
'yref': 'paper',
'x': 0.23,
'y': 0.5,
'text': target,
'font': {
"size": 22,
"color": "black"
},
'showarrow': False
}
]
}
# apparently we don't want the boundary now
base_chart['marker']['line']['width'] = 0
fig = {"data": [base_chart, meter_chart],
"layout": layout}
plotly.offline.plot(fig)
@staticmethod
def _determine_min_max(reviews, min_date, max_date):
for review in reviews:
if min_date > review.date:
min_date = review.date
if max_date < review.date:
max_date = review.date
return min_date, max_date
def determine_date(self, reviews):
if self.group_by != 'nothing':
max_date = reviews[0][0].date
min_date = reviews[0][0].date
for group in reviews:
min_date, max_date = self._determine_min_max(reviews=group, min_date=min_date, max_date=max_date)
else:
max_date = reviews[0].date
min_date = reviews[0].date
min_date, max_date = self._determine_min_max(reviews=reviews, min_date=min_date, max_date=max_date)
return min_date, max_date
# TODO optimized to not call this one everytime
def get_dated_title(self, title, grouped_reviews):
"""
Return the title with a proper date
:param title:
:param grouped_reviews:
:return:
"""
min_date, max_date = self.determine_date(grouped_reviews)
if min_date.year == max_date.year:
title += " (%d)" % max_date.year
else:
title += " (%d to %d)" % (min_date.year, max_date.year)
return title
@staticmethod
def get_named_title(title, reviewers):
if len(reviewers) > 0:
title += " (" + ", ".join(reviewers) + ") "
return title
def get_rating_filtered_title(self, title):
for opt, rating in self.rating_filters:
title += " (" + opt + " " + str(rating) + ")"
return title
def group_plot(self, data, labels, title, ylabel):
figure = {
"data": [
Bar(x=labels, y=data)
],
"layout": Layout(
title=title,
xaxis=dict(
title=self.group_by
),
yaxis=dict(
title=ylabel
),
)
}
plotly.offline.plot(figure)
def double_group_plot(self, gk_grouped_reviews, y, ylabel, labels, title):
traces = []
for idx in range(len(labels)):
traces.append(go.Bar(
x=gk_grouped_reviews.labels,
y=y[:, idx], # value for the second level label
name=labels[idx] # second level label
))
layout = go.Layout(
title=title,
barmode='group',
xaxis=dict(
title=self.group_by
),
yaxis=dict(
title=ylabel
),
)
fig = go.Figure(data=traces, layout=layout)
plotly.offline.plot(fig)
@staticmethod
def scatter(x, y, title, ylabel):
layout = dict(title=title,
yaxis=dict(
title=ylabel)
,
xaxis=dict(
title="Date")
)
# Create a trace
trace = go.Scatter(
x=x,
y=y,
mode='markers'
)
data = [trace]
fig = dict(data=data, layout=layout)
plotly.offline.plot(fig)
|
mit
| -1,554,547,537,877,476,000
| 28.729412
| 113
| 0.460361
| false
| 4.254209
| false
| false
| false
|
HuStmpHrrr/gjms
|
clickonce/publish.py
|
1
|
3780
|
from __future__ import print_function
import subprocess
import os
import sys
import shutil
import datetime
import distutils.dir_util
if sys.version_info < (3,):
input = raw_input
str = unicode
pwd = os.getcwd()
appver_file = r'.\AppVer'
target_shares = {
'release': [],
'test' : [],
'dev' : []
}
# it needs this transformation because msbuild does a direct string concatenation instead of a path join.
def unify_path(p):
if isinstance(p, str):
p = p if p.endswith(os.path.sep) else p+os.path.sep
return (p, p)
else:
return p
target_shares = {k: [unify_path(p) for p in v] for k, v in target_shares.items()}
output_dir = r'bin\publish'
publish_dir = r'bin\publishapp.publish'
msbuild_folder = r'%ProgramFiles%\MSBuild\12.0\bin\amd64' \
if os.path.exists(r'%s\MSBuild\12.0\bin\amd64' % os.environ['PROGRAMFILES'])\
else r'%ProgramFiles(x86)%\MSBuild\12.0\bin\amd64'
def get_appver():
if not os.path.exists(appver_file):
with open(appver_file) as fd:
fd.write('1.0.0.0')
return '1.0.0.0'
with open(appver_file) as fd:
return fd.readline().strip()
def incr_appver(ver):
vers = ver.split('.')
vers[-1] = str(int(vers[-1]) + 1)
return '.'.join(vers)
def set_appver(ver):
with open(appver_file, 'w') as fd:
fd.write(ver)
def get_cmd(target, updateurl, ver, env):
template = r'"{0}\msbuild" /t:clean;publish /property:OutputPath={1},PublishUrl={2},InstallUrl={2},UpdateUrl={3},ApplicationVersion={4},MinimumRequiredVersion={4},AssemblyName="{5}"'
cmd = template.format(msbuild_folder, output_dir, target, updateurl, ver, 'NST System Configurator '+env)
return cmd
if __name__=='__main__':
error = {}
print('current python implementation version is', sys.version)
print('currently working in: {}'.format(pwd))
print('please make sure this script runs directly under the project folder.')
env = input('build environment({}): '.format(', '.join(sorted(target_shares.keys()))))
while env not in target_shares:
print("nonexisting environment: {}".format(env), file=sys.stderr)
env = input('build environment({}): '.format(', '.join(sorted(target_shares.keys()))))
ver = incr_appver(get_appver())
for i, p in enumerate(target_shares[env]):
target, updateutl = p
cmd = get_cmd(target, updateurl, ver, env+str(i))
print('executing {}'.format(cmd))
print('----------------------------------')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
with proc.stdout:
for l in proc.stdout:
print(l.strip().decode('utf-8'))
proc.terminate()
print('----------------------------------')
if proc.returncode == 0:
try:
distutils.dir_util.copy_tree(publish_dir, target)
except Exception as e:
error[target] = e
print('error occurred: {}'.format(str(e)), file=sys.stderr)
distutils.dir_util.copy_tree(publish_dir, r'bin\backup' + '\\' + str(i))
else:
print("error: {}".format(proc.returncode), file=sys.stderr)
print
if len(error) != 0:
print('Error occurred:', file=sys.stderr)
for k, e in error.items():
print('{}: {}'.format(k, str(e)), file=sys.stderr)
print('has backed up the folder.', file=sys.stderr)
try:
set_appver(ver)
except IOError as e:
print("failed to write to file: {}".format(str(e)), file=sys.stderr)
print('next application version will be {}.'.format(incr_appver(ver)), file=sys.stderr)
input('press enter to continue...')
|
lgpl-2.1
| 2,146,401,426,755,421,700
| 34.327103
| 186
| 0.589153
| false
| 3.513011
| false
| false
| false
|
piappl/robrex_mapping
|
img_tools/scripts/trajectory_capture.py
|
1
|
1683
|
#!/usr/bin/env python
##
# @file trajectory_capture.py
# @author Artur Wilkowski <ArturWilkowski@piap.pl>
#
# @section LICENSE
#
# Copyright (C) 2015, Industrial Research Institute for Automation and Measurements
# Security and Defence Systems Division <http://www.piap.pl>
import roslib
roslib.load_manifest('img_tools')
import sys
import rospy
from nav_msgs.msg import Path
import yaml
import os
class trajectory_capture:
def pathcallback(self, data):
self.mapper_path = data
def savepath(self, filename):
f = open(filename, 'w')
for pose in self.mapper_path.poses:
f.write(str(pose.header.seq) + ' ' + str(pose.pose.position.x) + ' ' + str(pose.pose.position.y) + ' ' + str(pose.pose.position.z) + ' ' + \
str(pose.pose.orientation.x) + ' ' + str(pose.pose.orientation.y) + ' ' + str(pose.pose.orientation.z) + ' ' + str(pose.pose.orientation.w) + '\n')
#print pose.header.seq
#print pose.pose.position.x
#print pose.pose.position.y
#print pose.pose.position.z
#print pose.pose.orientation.x
#print pose.pose.orientation.y
#print pose.pose.orientation.z
#print pose.pose.orientation.w
f.close()
def __init__(self):
self.mapper_path_sub = rospy.Subscriber('mapper_path', Path, self.pathcallback)
self.mapper_path = None
def main(args):
rospy.init_node('trajectory_capture', anonymous=True)
ic = trajectory_capture()
rospy.spin()
print 'Saving ' + 'odompath.txt' + ' on exit.'
ic.savepath('odompath.txt')
if __name__ == '__main__':
main(sys.argv)
|
gpl-2.0
| 3,916,604,659,894,667,000
| 30.166667
| 167
| 0.619727
| false
| 3.236538
| false
| false
| false
|
dit/dit
|
dit/inference/counts.py
|
1
|
6393
|
"""
Non-cython methods for getting counts and distributions from data.
"""
import numpy as np
__all__ = (
'counts_from_data',
'distribution_from_data',
'get_counts',
)
try: # cython
from .pycounts import counts_from_data, distribution_from_data
except ImportError: # no cython
from boltons.iterutils import windowed_iter
from collections import Counter, defaultdict
from itertools import product
from .. import modify_outcomes
from ..exceptions import ditException
def counts_from_data(data, hLength, fLength, marginals=True, alphabet=None, standardize=True):
"""
Returns conditional counts from `data`.
To obtain counts for joint distribution only, use fLength=0.
Parameters
----------
data : NumPy array
The data used to calculate morphs. Note: `data` cannot be a generator.
Also, if standardize is True, then data can be any indexable iterable,
such as a list or tuple.
hLength : int
The maxmimum history word length used to calculate morphs.
fLength : int
The length of future words that defines the morph.
marginals : bool
If True, then the morphs for all histories words from L=0 to L=hLength
are calculated. If False, only histories of length L=hLength are
calculated.
alphabet : list
The alphabet to use when creating the morphs. If `None`, then one is
obtained from `data`. If not `None`, then the provided alphabet
supplements what appears in the data. So the data is always scanned
through in order to get the proper alphabet.
standardize : bool
The algorithm requires that the symbols in data be standardized to
a canonical alphabet consisting of integers from 0 to k-1, where k
is the alphabet size. If `data` is already standard, then an extra
pass through the data can be avoided by setting `standardize` to
`False`, but note: if `standardize` is False, then data MUST be a
NumPy array.
Returns
-------
histories : list
A list of observed histories, corresponding to the rows in `cCounts`.
cCounts : NumPy array
A NumPy array representing conditional counts. The rows correspond to
the observed histories, so this is sparse. The number of rows in this
array cannot be known in advance, but the number of columns will be
equal to the alphabet size raised to the `fLength` power.
hCounts : NumPy array
A 1D array representing the count of each history word.
alphabet : tuple
The ordered tuple representing the alphabet of the data. If `None`,
the one is created from the data.
Notes
-----
This requires three complete passes through the data. One to obtain
the full alphabet. Another to standardize the data. A final pass to
obtain the counts.
This is implemented densely. So during the course of the algorithm,
we work with a large array containing a row for each possible history.
Only the rows corresponding to observed histories are returned.
"""
try:
data = list(map(tuple, data))
except TypeError:
pass
counts = Counter(windowed_iter(data, hLength + fLength))
cond_counts = defaultdict(lambda: defaultdict(int))
for word, count in counts.items():
cond_counts[word[:hLength]][word[hLength:]] += count
histories = sorted(counts.keys())
alphabet = set(alphabet) if alphabet is not None else set()
alphabet = tuple(sorted(alphabet.union(*[set(hist) for hist in histories])))
cCounts = np.empty((len(histories), len(alphabet)**fLength))
for i, hist in enumerate(histories):
for j, future in enumerate(product(alphabet, repeat=fLength)):
cCounts[i, j] = cond_counts[hist][future]
hCounts = cCounts.sum(axis=1)
return histories, cCounts, hCounts, alphabet
def distribution_from_data(d, L, trim=True, base=None):
"""
Returns a distribution over words of length `L` from `d`.
The returned distribution is the naive estimate of the distribution,
which assigns probabilities equal to the number of times a particular
word appeared in the data divided by the total number of times a word
could have appeared in the data.
Roughly, it corresponds to the stationary distribution of a maximum
likelihood estimate of the transition matrix of an (L-1)th order Markov
chain.
Parameters
----------
d : list
A list of symbols to be converted into a distribution.
L : integer
The length of the words for the distribution.
trim : bool
If true, then words with zero probability are trimmed from the
distribution.
base : int or string
The desired base of the returned distribution. If `None`, then the
value of `dit.ditParams['base']` is used.
"""
from dit import ditParams, Distribution
try:
d = list(map(tuple, d))
except TypeError:
pass
if base is None:
base = ditParams['base']
words, _, counts, _ = counts_from_data(d, L, 0)
# We turn the counts to probabilities
pmf = counts / counts.sum()
dist = Distribution(words, pmf, trim=trim)
dist.set_base(base)
if L == 1:
try:
dist = modify_outcomes(dist, lambda o: o[0])
except ditException:
pass
return dist
def get_counts(data, length):
"""
Count the occurrences of all words of `length` in `data`.
Parameters
----------
data : iterable
The sequence of samples
length : int
The length to group samples into.
Returns
-------
counts : np.array
Array with the count values.
"""
hists, _, counts, _ = counts_from_data(data, length, 0)
mask = np.array([len(h) == length for h in hists])
counts = counts[mask]
return counts
|
bsd-3-clause
| -8,556,384,628,051,415,000
| 33.556757
| 98
| 0.613796
| false
| 4.582796
| false
| false
| false
|
alex-am/pyalgo
|
pyalgo/play/sum.py
|
1
|
1135
|
# Given a list of positive integers S = [s_i] and a positive integer t
# find if t can be written as the sum of a subset of elements of S
import numpy as np
def is_sum(t, S):
# is it np hard ?
# O(n**2 t)
# brute force 2**n
S = list(filter(lambda x:x>0, S))
n = len(S)
m = np.zeros((n, t), dtype=np.int)
# first line
for s in range(0, t):
m[0, s] = s + 1
#we can be lucky
for j in range(1, n):
for i in range(1, t):
for s in S:
if i < s:
continue
if m[j-1, i-s]:
m[j, i] = s
if i == (t-1):
#reached the target we are done
return (j, m)
break
return (j, m)
def get_shortest(i, m):
_, t = m.shape
res = []
while i >= 0:
e = m[i, t-1]
res.append(e)
t = t - e
if t <= 0:
break
i -= 1
return res
if __name__ == "__main__":
t = 12
S = [3 ,4, 4, 4, 3, 12, 45]
i, m = is_sum(t, S)
print(i, m)
print(get_shortest(i, m))
|
gpl-3.0
| 438,430,288,067,461,700
| 22.666667
| 70
| 0.414978
| false
| 3.059299
| false
| false
| false
|
thomas-hori/Repuge-NG
|
ludicrous/ScrollingInterface.py
|
1
|
2538
|
from ludicrous.SimpleInterface import SimpleInterface
__copying__="""
Written by Thomas Hori
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/."""
class ScrollingInterface(SimpleInterface):
"""A subclass of SimpleInterface adding simple panning."""
def get_offsets(self):
"""Used for LOS optimisation and get_viewport_grids."""
x = y = 0
if self.playerobj.pt:
x, y = self.playerobj.pt
width, height = self.display.get_dimensions()
if width < 0:
width = 80
if height < 0:
height = 23
width -= 1
height -= 4
offsetx = x-(width//2)
roffsetx = offsetx+width
offsety = y-(height//2)
roffsety = offsety+height
return width, height, offsetx, offsety, roffsetx, roffsety
def get_viewport_grids(self):
if not self.level:
return SimpleInterface.get_viewport_grids(self)
width, height, offsetx, offsety, roffsetx, roffsety = self.get_offsets()
levwidth = len(self.level.grid)
levheight = len(self.level.grid[0])
colno = offsetx
coords = []
grid_subset = []
objgrid_subset = []
for unused in range(width):
if (colno >= 0) and (colno < levwidth):
gcol = self.level.grid[colno]
ocol = self.level.objgrid[colno]
c_sub = []
g_sub = []
o_sub = []
rowno = offsety
for unused2 in range(height):
c_sub.append((colno, rowno))
if (colno < 0) or (colno >= levwidth) or (rowno < 0) or (rowno >= levheight):
g_sub.append(("space", None))
o_sub.append([])
else:
g_sub.append(gcol[rowno])
o_sub.append(ocol[rowno])
rowno += 1
coords.append(c_sub)
grid_subset.append(g_sub)
objgrid_subset.append(o_sub)
colno += 1
return coords, grid_subset, objgrid_subset
def get_viewport_pt(self):
width, height = self.display.get_dimensions()
if width < 0:
width = 80
if height < 0:
height = 23
width -= 1
height -= 4
return (width)//2, (height)//2
|
mpl-2.0
| 6,258,991,873,130,474,000
| 34.782609
| 93
| 0.520489
| false
| 3.810811
| false
| false
| false
|
felixsch/mkcrowbar
|
tests/test_network.py
|
1
|
5238
|
from pytest import raises
from mkcrowbar import network
from fake import *
def test_iface_has_ipv4_addr(capsys, monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
args = ['-f', 'inet', 'addr', 'show', 'eth0']
local.has('ip', expect_args(args, load_fixture('ip_addr_show')))
ip = network.iface_has_ipv4_addr('eth0')
assert ip == '11.22.33.44'
def test_iface_backup_configuration(monkeypatch):
iface = 'eth0'
home_path = '/root/.mkcrowbar'
iface_path = '/etc/sysconfig/network/ifcfg-' + iface
monkeypatch.setattr('os.path.exists', LocalCommand('path.exists', expect_args([home_path], True)))
monkeypatch.setattr('os.makedirs', LocalCommand('makedirs', expect_args([home_path])))
monkeypatch.setattr('os.path.isfile', LocalCommand('path.isfile', expect_args([iface_path])))
monkeypatch.setattr('os.rename', LocalCommand('rename', expect_args([iface_path])))
network.iface_backup_configuration(iface)
def test_set_static_addr(monkeypatch):
iface = 'eth0'
path = '/etc/sysconfig/network/ifcfg-' + iface
stub_file = StubOpen()
monkeypatch.setattr('mkcrowbar.network.iface_backup_configuration',
LocalCommand('backup', expect_args([iface])))
monkeypatch.setattr('builtins.open', LocalCommand('open', expect_args([path], lambda *args: stub_file)))
network.iface_set_static_addr(iface, {'FOO': 'bar'})
assert 'DEVICE=eth0\n' in stub_file.result()
assert 'BOOTPROTO=static\n' in stub_file.result()
assert 'FOO=bar\n' in stub_file.result()
def test_start_stop(monkeypatch):
iface = 'eth1'
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
local.has('ifdown', expect_args([iface], return_ok()))
assert network.iface_stop(iface) is True
local.has('ifdown', expect_args([iface], return_error()))
assert network.iface_stop(iface) is False
local.has('ifup', expect_args([iface], return_ok()))
assert network.iface_start(iface) is True
local.has('ifup', expect_args([iface], return_error()))
assert network.iface_start(iface) is False
def test_uses_dhcp(monkeypatch):
iface = 'eth1'
path = '/etc/sysconfig/network/ifcfg-' + iface
dhcp = StubOpen(monkeypatch, expect_args([path], load_fixture('ifcfg-dhcp')))
static = StubOpen(monkeypatch, expect_args([path], load_fixture('ifcfg-static')))
monkeypatch.setattr('builtins.open', lambda *args: dhcp(args))
assert network.iface_uses_dhcp(iface) is True
monkeypatch.setattr('builtins.open', lambda *args: static(args))
assert network.iface_uses_dhcp(iface) is False
def test_hostname(capsys, monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
local.has('hostname', expect_args(['-f'], return_ok(' test.testme.com ')))
assert network.hostname('-f') == 'test.testme.com'
local.has('hostname', expect_args(['-f'], return_error('hostname: Name or service not known')))
with raises(SystemExit):
network.hostname('-f')
_, err = capsys.readouterr()
assert err == 'hostname: Name or service not known'
def test_set_hostname(monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
local.has('hostname', expect_args(['newhostname'], return_ok()))
assert network.set_hostname('newhostname') is True
def test_is_qualified_hostname(monkeypatch):
assert network.is_qualified_hostname('host') is False
assert network.is_qualified_hostname('moep@floep.com') is False
assert network.is_qualified_hostname('local.suse.com') is True
assert network.is_qualified_hostname('superlocal.local.suse.com') is True
def test_add_to_hosts(monkeypatch):
fqdn = 'example.test.com'
ip = '192.168.2.111'
path = '/etc/hosts'
clean_hosts = StubOpen(monkeypatch, expect_args([path], load_fixture('hosts')))
added_hosts = StubOpen(monkeypatch, expect_args([path], load_fixture('hosts-already-added')))
monkeypatch.setattr('builtins.open', lambda *args: clean_hosts(args))
assert network.add_to_hosts(ip, fqdn) is 0
assert '192.168.2.111 example.test.com example\n' in clean_hosts.result()
monkeypatch.setattr('builtins.open', lambda *args: added_hosts(args))
assert network.add_to_hosts(ip, fqdn) is -1
def test_has_running_firewall(monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
local.has('iptables', expect_args(['-S'], "-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT"))
assert network.has_running_firewall() is False
local.has('iptables', expect_args(['-S'], load_fixture('used_iptables')))
assert network.has_running_firewall() is True
def test_is_domain_name_reachable(monkeypatch):
local = LocalCommands()
monkeypatch.setattr('mkcrowbar.network.local', local)
local.has('ping', expect_args(['-c','1', 'fooo.net'], return_ok()))
assert network.is_domain_name_reachable('fooo.net') is True
local.has('ping', expect_args(['-c','1', 'fooooooo.net'], return_error()))
assert network.is_domain_name_reachable('fooooooo.net') is False
|
apache-2.0
| -4,987,254,012,026,377,000
| 34.391892
| 109
| 0.685567
| false
| 3.501337
| true
| false
| false
|
xhochy/g-octave
|
g_octave/description_tree.py
|
1
|
3391
|
# -*- coding: utf-8 -*-
"""
g_octave.description_tree
~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a Python object with the content of a directory
tree with DESCRIPTION files. The object contains *g_octave.Description*
objects for each DESCRIPTION file.
:copyright: (c) 2009-2010 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ['DescriptionTree']
import glob
import os
import re
from .config import Config
from .description import Description
from .log import Log
from portage.versions import vercmp
log = Log('g_octave.description_tree')
config = Config()
# from http://wiki.python.org/moin/HowTo/Sorting/
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class DescriptionTree(list):
def __init__(self, parse_sysreq=True):
log.info('Parsing the package database.')
list.__init__(self)
self._categories = [i.strip() for i in config.categories.split(',')]
for my_file in glob.glob(os.path.join(config.db, 'octave-forge', \
'**', '**', '*.DESCRIPTION')):
description = Description(my_file, parse_sysreq=parse_sysreq)
if description.CAT in self._categories:
self.append(description)
def package_versions(self, pn):
tmp = []
for pkg in self:
if pkg.PN == pn:
tmp.append(pkg.PV)
tmp.sort(key=cmp_to_key(vercmp))
return tmp
def latest_version(self, pn):
tmp = self.package_versions(pn)
return (len(tmp) > 0) and tmp[-1] or None
def latest_version_from_list(self, pv_list):
tmp = pv_list[:]
tmp.sort(key=cmp_to_key(vercmp))
return (len(tmp) > 0) and tmp[-1] or None
def search(self, term):
# term can be a regular expression
re_term = re.compile(r'%s' % term)
packages = {}
for pkg in self:
if re_term.search(pkg.PN) is not None:
if pkg.PN not in packages:
packages[pkg.PN] = []
packages[pkg.PN].append(pkg.PV)
packages[pkg.PN].sort(key=cmp_to_key(vercmp))
return packages
def list(self):
packages = {}
for category in self._categories:
packages[category] = {}
for pkg in self:
if pkg.PN not in packages[pkg.CAT]:
packages[pkg.CAT][pkg.PN] = []
packages[pkg.CAT][pkg.PN].append(pkg.PV)
packages[pkg.CAT][pkg.PN].sort(key=cmp_to_key(vercmp))
return packages
def get(self, p):
for pkg in self:
if pkg.P == p:
return pkg
return None
|
gpl-2.0
| -2,548,059,103,229,188,600
| 29.54955
| 76
| 0.555883
| false
| 3.706011
| true
| false
| false
|
alphagov/notifications-api
|
tests/app/db.py
|
1
|
42588
|
import random
import uuid
from datetime import date, datetime, timedelta
import pytest
from app import db
from app.dao import fact_processing_time_dao
from app.dao.email_branding_dao import dao_create_email_branding
from app.dao.inbound_sms_dao import dao_create_inbound_sms
from app.dao.invited_org_user_dao import save_invited_org_user
from app.dao.invited_user_dao import save_invited_user
from app.dao.jobs_dao import dao_create_job
from app.dao.notifications_dao import dao_create_notification
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_create_organisation,
)
from app.dao.permissions_dao import permission_dao
from app.dao.service_callback_api_dao import save_service_callback_api
from app.dao.service_data_retention_dao import insert_service_data_retention
from app.dao.service_inbound_api_dao import save_service_inbound_api
from app.dao.service_permissions_dao import dao_add_service_permission
from app.dao.service_sms_sender_dao import (
dao_update_service_sms_sender,
update_existing_sms_sender_with_inbound_number,
)
from app.dao.services_dao import dao_add_user_to_service, dao_create_service
from app.dao.templates_dao import dao_create_template, dao_update_template
from app.dao.users_dao import save_model_user
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
MOBILE_TYPE,
SMS_TYPE,
AnnualBilling,
ApiKey,
BroadcastEvent,
BroadcastMessage,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber,
BroadcastStatusType,
Complaint,
DailySortedLetter,
Domain,
EmailBranding,
FactBilling,
FactNotificationStatus,
FactProcessingTime,
InboundNumber,
InboundSms,
InvitedOrganisationUser,
InvitedUser,
Job,
LetterBranding,
LetterRate,
Notification,
NotificationHistory,
Organisation,
Permission,
Rate,
ReturnedLetter,
Service,
ServiceCallbackApi,
ServiceContactList,
ServiceEmailReplyTo,
ServiceGuestList,
ServiceInboundApi,
ServiceLetterContact,
ServicePermission,
ServiceSmsSender,
Template,
TemplateFolder,
User,
WebauthnCredential,
)
def create_user(
*,
mobile_number="+447700900986",
email="notify@digital.cabinet-office.gov.uk",
state='active',
id_=None,
name="Test User"
):
data = {
'id': id_ or uuid.uuid4(),
'name': name,
'email_address': email,
'password': 'password',
'mobile_number': mobile_number,
'state': state
}
user = User.query.filter_by(email_address=email).first()
if not user:
user = User(**data)
save_model_user(user, validated_email_access=True)
return user
def create_permissions(user, service, *permissions):
permissions = [
Permission(service_id=service.id, user_id=user.id, permission=p)
for p in permissions
]
permission_dao.set_user_service_permission(user, service, permissions, _commit=True)
def create_service(
user=None,
service_name="Sample service",
service_id=None,
restricted=False,
count_as_live=True,
service_permissions=None,
research_mode=False,
active=True,
email_from=None,
prefix_sms=True,
message_limit=1000,
organisation_type='central',
check_if_service_exists=False,
go_live_user=None,
go_live_at=None,
crown=True,
organisation=None,
purchase_order_number=None,
billing_contact_names=None,
billing_contact_email_addresses=None,
billing_reference=None,
):
if check_if_service_exists:
service = Service.query.filter_by(name=service_name).first()
if (not check_if_service_exists) or (check_if_service_exists and not service):
service = Service(
name=service_name,
message_limit=message_limit,
restricted=restricted,
email_from=email_from if email_from else service_name.lower().replace(' ', '.'),
created_by=user if user else create_user(email='{}@digital.cabinet-office.gov.uk'.format(uuid.uuid4())),
prefix_sms=prefix_sms,
organisation_type=organisation_type,
organisation=organisation,
go_live_user=go_live_user,
go_live_at=go_live_at,
crown=crown,
purchase_order_number=purchase_order_number,
billing_contact_names=billing_contact_names,
billing_contact_email_addresses=billing_contact_email_addresses,
billing_reference=billing_reference,
)
dao_create_service(
service,
service.created_by,
service_id,
service_permissions=service_permissions,
)
service.active = active
service.research_mode = research_mode
service.count_as_live = count_as_live
else:
if user and user not in service.users:
dao_add_user_to_service(service, user)
return service
def create_service_with_inbound_number(
inbound_number='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
inbound = create_inbound_number(number=inbound_number, service_id=service.id)
update_existing_sms_sender_with_inbound_number(service_sms_sender=sms_sender,
sms_sender=inbound_number,
inbound_number_id=inbound.id)
return service
def create_service_with_defined_sms_sender(
sms_sender_value='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
dao_update_service_sms_sender(service_id=service.id,
service_sms_sender_id=sms_sender.id,
is_default=True,
sms_sender=sms_sender_value)
return service
def create_template(
service,
template_type=SMS_TYPE,
template_name=None,
subject='Template subject',
content='Dear Sir/Madam, Hello. Yours Truly, The Government.',
reply_to=None,
hidden=False,
archived=False,
folder=None,
postage=None,
process_type='normal',
contact_block_id=None
):
data = {
'name': template_name or '{} Template Name'.format(template_type),
'template_type': template_type,
'content': content,
'service': service,
'created_by': service.created_by,
'reply_to': reply_to,
'hidden': hidden,
'folder': folder,
'process_type': process_type,
}
if template_type == LETTER_TYPE:
data["postage"] = postage or "second"
if contact_block_id:
data['service_letter_contact_id'] = contact_block_id
if template_type != SMS_TYPE:
data['subject'] = subject
template = Template(**data)
dao_create_template(template)
if archived:
template.archived = archived
dao_update_template(template)
return template
def create_notification(
template=None,
job=None,
job_row_number=None,
to_field=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
personalisation=None,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
scheduled_for=None,
normalised_to=None,
one_off=False,
reply_to_text=None,
created_by_id=None,
postage=None,
document_download_count=None,
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if to_field is None:
to_field = '+447700900855' if template.template_type == SMS_TYPE else 'test@example.com'
if status not in ('created', 'validation-failed', 'virus-scan-failed', 'pending-virus-check'):
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if not one_off and (job is None and api_key is None):
# we did not specify in test - lets create it
api_key = ApiKey.query.filter(ApiKey.service == template.service, ApiKey.key_type == key_type).first()
if not api_key:
api_key = create_api_key(template.service, key_type=key_type)
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': uuid.uuid4(),
'to': to_field,
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'personalisation': personalisation,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'normalised_to': normalised_to,
'reply_to_text': reply_to_text,
'created_by_id': created_by_id,
'postage': postage,
'document_download_count': document_download_count,
}
notification = Notification(**data)
dao_create_notification(notification)
return notification
def create_notification_history(
template=None,
job=None,
job_row_number=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
created_by_id=None,
postage=None,
id=None
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if status != 'created':
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': id or uuid.uuid4(),
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'created_by_id': created_by_id,
'postage': postage
}
notification_history = NotificationHistory(**data)
db.session.add(notification_history)
db.session.commit()
return notification_history
def create_job(
template,
notification_count=1,
created_at=None,
job_status='pending',
scheduled_for=None,
processing_started=None,
processing_finished=None,
original_file_name='some.csv',
archived=False,
contact_list_id=None,
):
data = {
'id': uuid.uuid4(),
'service_id': template.service_id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'original_file_name': original_file_name,
'notification_count': notification_count,
'created_at': created_at or datetime.utcnow(),
'created_by': template.created_by,
'job_status': job_status,
'scheduled_for': scheduled_for,
'processing_started': processing_started,
'processing_finished': processing_finished,
'archived': archived,
'contact_list_id': contact_list_id,
}
job = Job(**data)
dao_create_job(job)
return job
def create_service_permission(service_id, permission=EMAIL_TYPE):
dao_add_service_permission(
service_id if service_id else create_service().id, permission)
service_permissions = ServicePermission.query.all()
return service_permissions
def create_inbound_sms(
service,
notify_number=None,
user_number='447700900111',
provider_date=None,
provider_reference=None,
content='Hello',
provider="mmg",
created_at=None
):
if not service.inbound_number:
create_inbound_number(
# create random inbound number
notify_number or '07{:09}'.format(random.randint(0, 1e9 - 1)),
provider=provider,
service_id=service.id
)
inbound = InboundSms(
service=service,
created_at=created_at or datetime.utcnow(),
notify_number=service.get_inbound_number(),
user_number=user_number,
provider_date=provider_date or datetime.utcnow(),
provider_reference=provider_reference or 'foo',
content=content,
provider=provider
)
dao_create_inbound_sms(inbound)
return inbound
def create_service_inbound_api(
service,
url="https://something.com",
bearer_token="some_super_secret",
):
service_inbound_api = ServiceInboundApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id
)
save_service_inbound_api(service_inbound_api)
return service_inbound_api
def create_service_callback_api(
service,
url="https://something.com",
bearer_token="some_super_secret",
callback_type="delivery_status"
):
service_callback_api = ServiceCallbackApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id,
callback_type=callback_type
)
save_service_callback_api(service_callback_api)
return service_callback_api
def create_email_branding(colour='blue', logo='test_x2.png', name='test_org_1', text='DisplayName'):
data = {
'colour': colour,
'logo': logo,
'name': name,
'text': text,
}
email_branding = EmailBranding(**data)
dao_create_email_branding(email_branding)
return email_branding
def create_rate(start_date, value, notification_type):
rate = Rate(
id=uuid.uuid4(),
valid_from=start_date,
rate=value,
notification_type=notification_type
)
db.session.add(rate)
db.session.commit()
return rate
def create_letter_rate(start_date=None, end_date=None, crown=True, sheet_count=1, rate=0.33, post_class='second'):
if start_date is None:
start_date = datetime(2016, 1, 1)
rate = LetterRate(
id=uuid.uuid4(),
start_date=start_date,
end_date=end_date,
crown=crown,
sheet_count=sheet_count,
rate=rate,
post_class=post_class
)
db.session.add(rate)
db.session.commit()
return rate
def create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name=None):
id_ = uuid.uuid4()
name = key_name if key_name else '{} api key {}'.format(key_type, id_)
api_key = ApiKey(
service=service,
name=name,
created_by=service.created_by,
key_type=key_type,
id=id_,
secret=uuid.uuid4()
)
db.session.add(api_key)
db.session.commit()
return api_key
def create_inbound_number(number, provider='mmg', active=True, service_id=None):
inbound_number = InboundNumber(
id=uuid.uuid4(),
number=number,
provider=provider,
active=active,
service_id=service_id
)
db.session.add(inbound_number)
db.session.commit()
return inbound_number
def create_reply_to_email(
service,
email_address,
is_default=True,
archived=False
):
data = {
'service': service,
'email_address': email_address,
'is_default': is_default,
'archived': archived,
}
reply_to = ServiceEmailReplyTo(**data)
db.session.add(reply_to)
db.session.commit()
return reply_to
def create_service_sms_sender(
service,
sms_sender,
is_default=True,
inbound_number_id=None,
archived=False
):
data = {
'service_id': service.id,
'sms_sender': sms_sender,
'is_default': is_default,
'inbound_number_id': inbound_number_id,
'archived': archived,
}
service_sms_sender = ServiceSmsSender(**data)
db.session.add(service_sms_sender)
db.session.commit()
return service_sms_sender
def create_letter_contact(
service,
contact_block,
is_default=True,
archived=False
):
data = {
'service': service,
'contact_block': contact_block,
'is_default': is_default,
'archived': archived,
}
letter_content = ServiceLetterContact(**data)
db.session.add(letter_content)
db.session.commit()
return letter_content
def create_annual_billing(
service_id, free_sms_fragment_limit, financial_year_start
):
annual_billing = AnnualBilling(
service_id=service_id,
free_sms_fragment_limit=free_sms_fragment_limit,
financial_year_start=financial_year_start
)
db.session.add(annual_billing)
db.session.commit()
return annual_billing
def create_domain(domain, organisation_id):
domain = Domain(domain=domain, organisation_id=organisation_id)
db.session.add(domain)
db.session.commit()
return domain
def create_organisation(
name='test_org_1',
active=True,
organisation_type=None,
domains=None,
organisation_id=None,
purchase_order_number=None,
billing_contact_names=None,
billing_contact_email_addresses=None,
billing_reference=None,
):
data = {
'id': organisation_id,
'name': name,
'active': active,
'organisation_type': organisation_type,
'purchase_order_number': purchase_order_number,
'billing_contact_names': billing_contact_names,
'billing_contact_email_addresses': billing_contact_email_addresses,
'billing_reference': billing_reference,
}
organisation = Organisation(**data)
dao_create_organisation(organisation)
for domain in domains or []:
create_domain(domain, organisation.id)
return organisation
def create_invited_org_user(organisation, invited_by, email_address='invite@example.com'):
invited_org_user = InvitedOrganisationUser(
email_address=email_address,
invited_by=invited_by,
organisation=organisation,
)
save_invited_org_user(invited_org_user)
return invited_org_user
def create_daily_sorted_letter(billing_day=None,
file_name="Notify-20180118123.rs.txt",
unsorted_count=0,
sorted_count=0):
daily_sorted_letter = DailySortedLetter(
billing_day=billing_day or date(2018, 1, 18),
file_name=file_name,
unsorted_count=unsorted_count,
sorted_count=sorted_count
)
db.session.add(daily_sorted_letter)
db.session.commit()
return daily_sorted_letter
def create_ft_billing(bst_date,
template,
*,
provider='test',
rate_multiplier=1,
international=False,
rate=0,
billable_unit=1,
notifications_sent=1,
postage='none'
):
data = FactBilling(bst_date=bst_date,
service_id=template.service_id,
template_id=template.id,
notification_type=template.template_type,
provider=provider,
rate_multiplier=rate_multiplier,
international=international,
rate=rate,
billable_units=billable_unit,
notifications_sent=notifications_sent,
postage=postage)
db.session.add(data)
db.session.commit()
return data
def create_ft_notification_status(
bst_date,
notification_type='sms',
service=None,
template=None,
job=None,
key_type='normal',
notification_status='delivered',
count=1
):
if job:
template = job.template
if template:
service = template.service
notification_type = template.template_type
else:
if not service:
service = create_service()
template = create_template(service=service, template_type=notification_type)
data = FactNotificationStatus(
bst_date=bst_date,
template_id=template.id,
service_id=service.id,
job_id=job.id if job else uuid.UUID(int=0),
notification_type=notification_type,
key_type=key_type,
notification_status=notification_status,
notification_count=count
)
db.session.add(data)
db.session.commit()
return data
def create_process_time(bst_date='2021-03-01', messages_total=35, messages_within_10_secs=34):
data = FactProcessingTime(
bst_date=bst_date,
messages_total=messages_total,
messages_within_10_secs=messages_within_10_secs
)
fact_processing_time_dao.insert_update_processing_time(data)
def create_service_guest_list(service, email_address=None, mobile_number=None):
if email_address:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, email_address)
elif mobile_number:
guest_list_user = ServiceGuestList.from_string(service.id, MOBILE_TYPE, mobile_number)
else:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, 'guest_list_user@digital.gov.uk')
db.session.add(guest_list_user)
db.session.commit()
return guest_list_user
def create_complaint(service=None,
notification=None,
created_at=None):
if not service:
service = create_service()
if not notification:
template = create_template(service=service, template_type='email')
notification = create_notification(template=template)
complaint = Complaint(notification_id=notification.id,
service_id=service.id,
ses_feedback_id=str(uuid.uuid4()),
complaint_type='abuse',
complaint_date=datetime.utcnow(),
created_at=created_at if created_at else datetime.now()
)
db.session.add(complaint)
db.session.commit()
return complaint
def ses_complaint_callback_malformed_message_id():
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"recipient1@example.com"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","badMessageId":"ref1","destination":["recipient1@example.com"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback_with_missing_complaint_type():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"recipient1@example.com"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["recipient1@example.com"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complaintFeedbackType": "abuse", "complainedRecipients":[{"emailAddress":"recipient1@example.com"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"52.208.24.161","sendingAccountId":"888450439860","messageId":"ref1","destination":["recipient1@example.com"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_notification_callback():
return '{\n "Type" : "Notification",\n "MessageId" : "ref1",' \
'\n "TopicArn" : "arn:aws:sns:eu-west-1:123456789012:testing",' \
'\n "Message" : "{\\"notificationType\\":\\"Delivery\\",' \
'\\"mail\\":{\\"timestamp\\":\\"2016-03-14T12:35:25.909Z\\",' \
'\\"source\\":\\"test@test-domain.com\\",' \
'\\"sourceArn\\":\\"arn:aws:ses:eu-west-1:123456789012:identity/testing-notify\\",' \
'\\"sendingAccountId\\":\\"123456789012\\",' \
'\\"messageId\\":\\"ref1\\",' \
'\\"destination\\":[\\"testing@digital.cabinet-office.gov.uk\\"]},' \
'\\"delivery\\":{\\"timestamp\\":\\"2016-03-14T12:35:26.567Z\\",' \
'\\"processingTimeMillis\\":658,' \
'\\"recipients\\":[\\"testing@digital.cabinet-office.gov.uk\\"],' \
'\\"smtpResponse\\":\\"250 2.0.0 OK 1457958926 uo5si26480932wjc.221 - gsmtp\\",' \
'\\"reportingMTA\\":\\"a6-238.smtp-out.eu-west-1.amazonses.com\\"}}",' \
'\n "Timestamp" : "2016-03-14T12:35:26.665Z",\n "SignatureVersion" : "1",' \
'\n "Signature" : "X8d7eTAOZ6wlnrdVVPYanrAlsX0SMPfOzhoTEBnQqYkrNWTqQY91C0f3bxtPdUhUt' \
'OowyPAOkTQ4KnZuzphfhVb2p1MyVYMxNKcBFB05/qaCX99+92fjw4x9LeUOwyGwMv5F0Vkfi5qZCcEw69uVrhYL' \
'VSTFTrzi/yCtru+yFULMQ6UhbY09GwiP6hjxZMVr8aROQy5lLHglqQzOuSZ4KeD85JjifHdKzlx8jjQ+uj+FLzHXPMA' \
'PmPU1JK9kpoHZ1oPshAFgPDpphJe+HwcJ8ezmk+3AEUr3wWli3xF+49y8Z2anASSVp6YI2YP95UT8Rlh3qT3T+V9V8rbSVislxA==",' \
'\n "SigningCertURL" : "https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-bb750' \
'dd426d95ee9390147a5624348ee.pem",' \
'\n "UnsubscribeURL" : "https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&S' \
'subscriptionArn=arn:aws:sns:eu-west-1:302763885840:preview-emails:d6aad3ef-83d6-4cf3-a470-54e2e75916da"\n}'
def create_service_data_retention(
service,
notification_type='sms',
days_of_retention=3
):
data_retention = insert_service_data_retention(
service_id=service.id,
notification_type=notification_type,
days_of_retention=days_of_retention
)
return data_retention
def create_invited_user(service=None,
to_email_address=None):
if service is None:
service = create_service()
if to_email_address is None:
to_email_address = 'invited_user@digital.gov.uk'
from_user = service.users[0]
data = {
'service': service,
'email_address': to_email_address,
'from_user': from_user,
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': [str(uuid.uuid4()), str(uuid.uuid4())]
}
invited_user = InvitedUser(**data)
save_invited_user(invited_user)
return invited_user
def create_template_folder(service, name='foo', parent=None):
tf = TemplateFolder(name=name, service=service, parent=parent)
db.session.add(tf)
db.session.commit()
return tf
def create_letter_branding(name='HM Government', filename='hm-government'):
test_domain_branding = LetterBranding(name=name,
filename=filename,
)
db.session.add(test_domain_branding)
db.session.commit()
return test_domain_branding
def set_up_usage_data(start_date):
year = int(start_date.strftime('%Y'))
one_week_earlier = start_date - timedelta(days=7)
two_days_later = start_date + timedelta(days=2)
one_week_later = start_date + timedelta(days=7)
one_month_later = start_date + timedelta(days=31)
# service with sms and letters:
service_1_sms_and_letter = create_service(
service_name='a - with sms and letter',
purchase_order_number="service purchase order number",
billing_contact_names="service billing contact names",
billing_contact_email_addresses="service@billing.contact email@addresses.gov.uk",
billing_reference="service billing reference"
)
letter_template_1 = create_template(service=service_1_sms_and_letter, template_type='letter')
sms_template_1 = create_template(service=service_1_sms_and_letter, template_type='sms')
create_annual_billing(
service_id=service_1_sms_and_letter.id, free_sms_fragment_limit=10, financial_year_start=year
)
org_1 = create_organisation(
name="Org for {}".format(service_1_sms_and_letter.name),
purchase_order_number="org1 purchase order number",
billing_contact_names="org1 billing contact names",
billing_contact_email_addresses="org1@billing.contact email@addresses.gov.uk",
billing_reference="org1 billing reference"
)
dao_add_service_to_organisation(
service=service_1_sms_and_letter,
organisation_id=org_1.id
)
create_ft_billing(bst_date=one_week_earlier, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=two_days_later, template=sms_template_1, billable_unit=1, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=1, rate=.35, postage='first')
create_ft_billing(bst_date=one_month_later, template=letter_template_1,
notifications_sent=4, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=2, rate=.45, postage='second')
# service with emails only:
service_with_emails = create_service(service_name='b - emails')
email_template = create_template(service=service_with_emails, template_type='email')
org_2 = create_organisation(
name='Org for {}'.format(service_with_emails.name),
)
dao_add_service_to_organisation(service=service_with_emails, organisation_id=org_2.id)
create_ft_billing(bst_date=start_date, template=email_template, notifications_sent=10)
# service with letters:
service_with_letters = create_service(service_name='c - letters only')
letter_template_3 = create_template(service=service_with_letters, template_type='letter')
org_for_service_with_letters = create_organisation(
name="Org for {}".format(service_with_letters.name),
purchase_order_number="org3 purchase order number",
billing_contact_names="org3 billing contact names",
billing_contact_email_addresses="org3@billing.contact email@addresses.gov.uk",
billing_reference="org3 billing reference"
)
dao_add_service_to_organisation(service=service_with_letters, organisation_id=org_for_service_with_letters.id)
create_ft_billing(bst_date=start_date, template=letter_template_3,
notifications_sent=2, billable_unit=3, rate=.50, postage='first')
create_ft_billing(bst_date=one_week_later, template=letter_template_3,
notifications_sent=8, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=one_month_later, template=letter_template_3,
notifications_sent=12, billable_unit=5, rate=.65, postage='second')
# service with letters, without an organisation:
service_with_letters_without_org = create_service(service_name='d - service without org')
letter_template_4 = create_template(service=service_with_letters_without_org, template_type='letter')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=7, billable_unit=4, rate=1.55, postage='rest-of-world')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=8, billable_unit=4, rate=1.55, postage='europe')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=2, billable_unit=1, rate=.35, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=1, billable_unit=1, rate=.50, postage='first')
# service with chargeable SMS, without an organisation
service_with_sms_without_org = create_service(
service_name='b - chargeable sms',
purchase_order_number="sms purchase order number",
billing_contact_names="sms billing contact names",
billing_contact_email_addresses="sms@billing.contact email@addresses.gov.uk",
billing_reference="sms billing reference"
)
sms_template = create_template(service=service_with_sms_without_org, template_type='sms')
create_annual_billing(
service_id=service_with_sms_without_org.id, free_sms_fragment_limit=10, financial_year_start=year
)
create_ft_billing(bst_date=one_week_earlier, template=sms_template, rate=0.11, billable_unit=12)
create_ft_billing(bst_date=two_days_later, template=sms_template, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=sms_template, billable_unit=2, rate=0.11)
# service with SMS within free allowance
service_with_sms_within_allowance = create_service(
service_name='e - sms within allowance'
)
sms_template_2 = create_template(service=service_with_sms_within_allowance, template_type='sms')
create_annual_billing(
service_id=service_with_sms_within_allowance.id, free_sms_fragment_limit=10, financial_year_start=year
)
create_ft_billing(bst_date=one_week_later, template=sms_template_2, billable_unit=2, rate=0.11)
# dictionary with services and orgs to return
return {
"org_1": org_1,
"service_1_sms_and_letter": service_1_sms_and_letter,
"org_2": org_2,
"service_with_emails": service_with_emails,
"org_for_service_with_letters": org_for_service_with_letters,
"service_with_letters": service_with_letters,
"service_with_letters_without_org": service_with_letters_without_org,
"service_with_sms_without_org": service_with_sms_without_org,
"service_with_sms_within_allowance": service_with_sms_within_allowance,
}
def create_returned_letter(service=None, reported_at=None, notification_id=None):
if not service:
service = create_service(service_name='a - with sms and letter')
returned_letter = ReturnedLetter(
service_id=service.id,
reported_at=reported_at or datetime.utcnow(),
notification_id=notification_id or uuid.uuid4(),
created_at=datetime.utcnow(),
)
db.session.add(returned_letter)
db.session.commit()
return returned_letter
def create_service_contact_list(
service=None,
original_file_name='EmergencyContactList.xls',
row_count=100,
template_type='email',
created_by_id=None,
archived=False,
):
if not service:
service = create_service(service_name='service for contact list', user=create_user())
contact_list = ServiceContactList(
service_id=service.id,
original_file_name=original_file_name,
row_count=row_count,
template_type=template_type,
created_by_id=created_by_id or service.users[0].id,
created_at=datetime.utcnow(),
archived=archived,
)
db.session.add(contact_list)
db.session.commit()
return contact_list
def create_broadcast_message(
template=None,
*,
service=None, # only used if template is not provided
created_by=None,
personalisation=None,
content=None,
status=BroadcastStatusType.DRAFT,
starts_at=None,
finishes_at=None,
areas=None,
stubbed=False
):
if template:
service = template.service
template_id = template.id
template_version = template.version
personalisation = personalisation or {}
content = template._as_utils_template_with_personalisation(
personalisation
).content_with_placeholders_filled_in
elif content:
template_id = None
template_version = None
personalisation = None
content = content
else:
pytest.fail('Provide template or content')
broadcast_message = BroadcastMessage(
service_id=service.id,
template_id=template_id,
template_version=template_version,
personalisation=personalisation,
status=status,
starts_at=starts_at,
finishes_at=finishes_at,
created_by_id=created_by.id if created_by else service.created_by_id,
areas=areas or {'areas': [], 'simple_polygons': []},
content=content,
stubbed=stubbed
)
db.session.add(broadcast_message)
db.session.commit()
return broadcast_message
def create_broadcast_event(
broadcast_message,
sent_at=None,
message_type='alert',
transmitted_content=None,
transmitted_areas=None,
transmitted_sender=None,
transmitted_starts_at=None,
transmitted_finishes_at=None,
):
b_e = BroadcastEvent(
service=broadcast_message.service,
broadcast_message=broadcast_message,
sent_at=sent_at or datetime.utcnow(),
message_type=message_type,
transmitted_content=transmitted_content or {'body': 'this is an emergency broadcast message'},
transmitted_areas=transmitted_areas or broadcast_message.areas,
transmitted_sender=transmitted_sender or 'www.notifications.service.gov.uk',
transmitted_starts_at=transmitted_starts_at,
transmitted_finishes_at=transmitted_finishes_at or datetime.utcnow() + timedelta(hours=24),
)
db.session.add(b_e)
db.session.commit()
return b_e
def create_broadcast_provider_message(
broadcast_event,
provider,
status='sending'
):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=status,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message
def create_webauthn_credential(
user,
name='my key',
*,
credential_data='ABC123',
registration_response='DEF456',
):
webauthn_credential = WebauthnCredential(
user=user,
name=name,
credential_data=credential_data,
registration_response=registration_response
)
db.session.add(webauthn_credential)
db.session.commit()
return webauthn_credential
|
mit
| -4,070,065,704,934,377,500
| 33.794118
| 544
| 0.630483
| false
| 3.6
| false
| false
| false
|
stephen-hoover/Arignote
|
arignote/nnets/nets.py
|
1
|
40064
|
"""This module describes fully-functioning networks created from the pieces in `layer`.
"""
from __future__ import division, print_function
import collections
import inspect
import numpy as np
import six
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from ..data import files
from ..data import readers
from ..nnets import layers
from ..nnets import training
from ..util import misc
from ..util import netlog
log = netlog.setup_logging("nets", level="INFO")
def define_logistic_regression(n_classes, l1_reg=0, l2_reg=0):
"""Shortcut to build the list of layer definitions (a single layer,
in this case) for a logistic regression classifier.
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
"""
# This network is only an output layer.
layer_defs = [["ClassificationOutputLayer", {"n_classes": n_classes,
"l1": l1_reg, "l2": l2_reg}]]
return layer_defs
def define_cnn(n_classes, input_image_shape, n_kernels, filter_scale, poolsize,
n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0):
"""Shortcut to build the list of layer definitions for
a convolutional neural network
Defines a series of convolutional layers, followed by max-pooling layers,
after which a multi-layer perceptron calculates the probabilities of
membership in each class.
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
input_image_shape : list or tuple
Shape of input image, (n_channels, n_pixels_x, n_pixels_y)
n_kernels : list of ints
Number of convolutional kernels in each convolutional layer
filter_scale : list of ints
Size of (square) filters in each convolutional layer.
Must be the same length as `n_kernels`.
poolsize : list of ints
Size of (square) non-overlapping max-pooling kernel to be
applied after each convolutional layer (may be zero, meaning
no max pooling after that layer). Must be the same length
as `n_kernels`.
n_hidden : list of ints
Number of units in each hidden layer
dropout_p : float or list of floats
Dropout fraction for input and each hidden layer. If a single float,
this dropout fraction will be applied to every layer.
activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"}
Activation function to use for all layers
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths for all layers
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
Examples
--------
>>> layers = define_cnn(10, (28, 28), n_kernels=[32, 32], filter_scale=[4, 3],
>>> poolsize=[0, 2], n_hidden=[400], dropout_p=0.2)
>>> print(layers)
[['InputImageLayer', {'n_images': 1, 'n_pixels': [28, 28], 'name': 'input'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-input'}],
['ConvLayer',
{'activation': 'relu',
'filter_shape': (4, 4),
'n_output_maps': 32,
'name': 'conv0'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv0'}],
['ConvLayer',
{'activation': 'relu',
'filter_shape': (3, 3),
'n_output_maps': 32,
'name': 'conv1'}],
['MaxPool2DLayer', {'name': 'maxpool1', 'pool_shape': (2, 2)}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-conv1'}],
['FCLayer',
{'activation': 'relu', 'l1': 0, 'l2': 0, 'n_units': 400, 'name': 'fc0'}],
['DropoutLayer', {'dropout_p': 0.2, 'name': 'DO-fc0'}],
['ClassificationOutputLayer', {'l1': 0, 'l2': 0, 'n_classes': 10}]]
"""
# Assume input images are 2D. If the `input_image_shape` is 3 elements,
# the first element is the number of images in the input. Otherwise, assume
# that there's only one image in the input.
if len(input_image_shape) == 3:
pass
elif len(input_image_shape) == 2:
input_image_shape = [1] + list(input_image_shape)
else:
raise ValueError("The input image shape must be (n_channels, n_pixels_x, n_pixels_y).")
try:
# Make sure that `n_hidden` is a list.
len(n_hidden)
except TypeError:
n_hidden = [n_hidden]
try:
# Make sure that `dropout_p` is a list.
len(dropout_p)
except TypeError:
dropout_p = (1 + len(n_hidden) + len(n_kernels)) * [dropout_p]
if len(dropout_p) != len(n_kernels) + len(n_hidden) + 1:
raise ValueError("Either specify one dropout for all layers or one dropout for "
"each layer (inputs + hidden layers).")
dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list.
# Start by putting on the input layer.
layer_defs = [["InputImageLayer", {"name": "input", "n_images": input_image_shape[0],
"n_pixels": input_image_shape[1:]}]]
input_do = dropout_p.pop()
if input_do:
layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}])
# Add convolutional layers.
for i_conv, (kernels, filter, pool) in enumerate(zip(n_kernels, filter_scale, poolsize)):
layer_defs.append(["ConvLayer", {"name": "conv{}".format(i_conv),
"n_output_maps": kernels,
"filter_shape": (filter, filter),
"activation": activation}])
if pool:
layer_defs.append(["MaxPool2DLayer", {"name": "maxpool{}".format(i_conv),
"pool_shape": (pool, pool)}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-conv{}".format(i_conv),
"dropout_p": layer_do}])
# Add fully-connected layers.
for i_hidden, hidden in enumerate(n_hidden):
layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden),
"n_units": hidden, "activation": activation,
"l1": l1_reg, "l2": l2_reg}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden),
"dropout_p": layer_do}])
# Put on an output layer.
layer_defs.append(["ClassificationOutputLayer", {"n_classes": n_classes, "l1": l1_reg,
"l2": l2_reg}])
return layer_defs
def define_mlp(n_classes, n_hidden, dropout_p, activation="relu", l1_reg=0, l2_reg=0):
"""Shortcut to create a multi-layer perceptron classifier
Parameters
----------
n_classes : int
Number of classes to calculate probabilities for
n_hidden : list of ints
Number of units in each hidden layer
dropout_p : float or list of floats
Dropout fraction for input and each hidden layer. If a single float,
this dropout fraction will be applied to every layer.
activation : {"relu", "prelu", "sigmoid", "tanh", "abstanh", "linear"}
Activation function to use for all layers
l1_reg, l2_reg : float, optional
L1 and L2 regularization strengths for all layers
Returns
-------
list
Layer definitions suitable for input to a `NNClassifier`
Examples
--------
>>> layers = define_mlp(10, [400, 400], [0.4, 0.25, 0.25], "prelu", l2_reg=1e-4)
>>> print(layers)
[['DropoutLayer', {'dropout_p': 0.4, 'name': 'DO-input'}],
['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc0'}],
['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc0'}],
['FCLayer', {'activation': 'prelu', 'l1': 0, 'l2': 0.0001, 'n_units': 400, 'name': 'fc1'}],
['DropoutLayer', {'dropout_p': 0.25, 'name': 'DO-fc1'}],
['ClassificationOutputLayer', {'l1': 0, 'l2': 0.0001, 'n_classes': 10, 'name': 'output'}]]
"""
try:
# Make sure that `n_hidden` is a list.
len(n_hidden)
except TypeError:
n_hidden = [n_hidden]
try:
# Make sure that `dropout_p` is a list.
len(dropout_p)
except TypeError:
dropout_p = (1 + len(n_hidden)) * [dropout_p]
if len(dropout_p) != len(n_hidden) + 1:
raise ValueError("Either specify one dropout for all layers or one dropout for "
"each layer (inputs + hidden layers).")
dropout_p = dropout_p[::-1] # Pops come from the end, so reverse this list.
# Start by putting on dropout for the input layer (if any).
layer_defs = []
input_do = dropout_p.pop()
if input_do:
layer_defs.append(["DropoutLayer", {"name": "DO-input", "dropout_p": input_do}])
# Add fully-connected layers.
for i_hidden, hidden in enumerate(n_hidden):
layer_defs.append(["FCLayer", {"name": "fc{}".format(i_hidden),
"n_units": hidden, "activation": activation,
"l1": l1_reg, "l2": l2_reg}])
layer_do = dropout_p.pop()
if layer_do:
layer_defs.append(["DropoutLayer", {"name": "DO-fc{}".format(i_hidden),
"dropout_p": layer_do}])
# Put on an output layer.
layer_defs.append(["ClassificationOutputLayer", {"name": "output", "n_classes": n_classes,
"l1": l1_reg, "l2": l2_reg}])
return layer_defs
class NNClassifier(object):
r"""A neural net to be used for a classification task.
The classification network is built from individual layers.
Compilation doesn't happen until necessary at training time.
This object can be pickled and unpickled; the entire state
of the object will be stored.
.. note:: After unpickling, the network will need to be compiled
(either through `fit` or by calling `compile` directly)
before it can be used.
Parameters
----------
layer_defs : list
Definition of the network layers. This should be a list of lists.
name : str, optional
Name of this neural network, for display purposes
n_in : int or tuple, optional
The shape of the input features. If supplied here, we'll initialize
the network layers now. Otherwise, this will be inferred from the
data supplied during a call to `fit` and the network layers will be
constructed at that time.
batch_size : int, optional
Batch size to be used for training. Only needed now if `n_in` is also
supplied -- it can be used to optimize convolutional layers on the CPU.
random_state : int or np.random.RandomState, optional
RNG or seed for a RNG. If not supplied, will be randomly initialized.
Other Parameters
----------------
stored_network : str, optional
Filename of pickled network. If supplied, initialize this object's
layers from weights stored in the `stored_network`. The pickled
network must have the same architecure as this network.
theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional
Symbolic random number generator. If not supplied, will be initialized
from the numpy RNG.
Attributes
----------
predict_proba : function
Input batch of examples, output probabilities of each class for each example.
Compiled by theano.
predict : function
Input batch of examples, output class with maximum probability for each example.
Compiled by theano.
layers_train : list
List of `Layer` objects. Potentially non-deterministic; used for training.
layers_inf : list
Network used for inference, deterministic. Identical architecture to and
shares parameters with `layers_train`.
params : list
All trainable parameters (theano shared variables) from this network
param_update_rules : list
All special update rules, one dictionary per parameter in `params`
n_params : int
Total number of individual trainable parameters
trainer : training.SupervisedTraining
Object used to train this network; present after calling `fit`
Examples
--------
>>> layers = [["FCLayer", {"name": "fc1", "n_units": 100, "activation": "relu", "l2": 0.001}],
["DropoutLayer", {"name": "DO-fc1", "dropout_p": 0.5}],
["ClassificationOutputLayer", {"name": "output", "n_classes": 10}]]
>>> cls = NNClassifier(layers, name="Small example net", random_state=42)
"""
def __init__(self, layer_defs, name="Neural Network Classifier", n_in=None,
batch_size=None, random_state=None, stored_network=None, theano_rng=None):
self.input = None
self.trainer = None
self.n_in = n_in
self.layer_defs = layer_defs
self.batch_size = batch_size
self.stored_network = stored_network
self.name = name
self.layers_train, self.layers_inf = [], []
self.l1, self.l2_sqr = 0, 0
self.params, self.param_update_rules, self.n_params = [], [], 0
if type(layer_defs) != list:
raise TypeError("Please input a list of layer definitions.")
self.set_rng(random_state, theano_rng) # Sets instance attributes `self.random_state` and `self.theano_rng`.
self.pickled_theano_rng = None # Use this to restore previous parameters.
# Define these Theano functions during the `compile` stage.
self.p_y_given_x = None
self.predict_proba = None
self.predict = None
if self.n_in is not None:
self._build_network(self.n_in, batch_size)
def _build_network(self, n_in, batch_size=None):
"""Create and store the layers of this network, along with auxiliary information such
as lists of the trainable parameters in the network."""
self.n_in = np.atleast_1d(n_in) # Make sure that `n_in` is a list or tuple.
if batch_size is not None:
self.batch_size = batch_size
# These next attributes are creating and storing Theano shared variables.
# The Layers contain shared variables for all the trainable parameters,
# and the regularization parameters are sums and products of the parameters.
self.layers_train = self._build_layers_train(self.layer_defs, self.stored_network)
self.layers_inf = self._duplicate_layer_stack(self.layers_train)
self.l1, self.l2_sqr = self._get_regularization(self.layers_train)
# Collect the trainable parameters from each layer and arrange them into lists.
self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train)
log.info("This network has {} trainable parameters.".format(self.n_params))
def _arrange_parameters(self, layers):
"""Extract all trainable parameters and any special update rules from each Layer.
Also calculate the total number of trainable parameters in this network.
Returns
-------
A 3-tuple of (parameters, parameter update rules, and number of parameters).
The first two elements are lists of equal length, and the number of parameters is
an integer.
"""
# The parameters of the model are the parameters of the two layers it is made out of.
params, param_update_rules = [], []
for ly in layers:
params += ly.params
param_update_rules += ly.param_update_rules
# Calculate the total number of trainable parameters in this network.
n_params = int(np.sum([np.sum([np.prod(param.get_value().shape) for param in layer.params])
for layer in layers if not getattr(layer, "fix_params", False)]))
return params, param_update_rules, n_params
def _get_regularization(self, layers):
"""Find the L1 and L2 regularization terms for this net. Combine the L1 and L2
terms from each Layer. Use the regularization strengths stored in each Layer.
Note that the value returned is `l2_sqr`, the sum of squares of all weights,
times the lambda parameter for each Layer.
Returns
-------
l1, l2_sqr : theano.shared
The `l1` is the sum of absolute values of weights times
lambda_l1 from each Layer, and `l2_sqr` is the sum of squares
of weights times lambda_l2 from each Layer.
"""
# L1 norm; one regularization option is to require the L1 norm to be small.
l1 = np.sum([ly.l1 for ly in layers if ly.l1 is not None])
if not l1:
log.debug("No L1 regularization in this model.")
l1 = theano.shared(np.cast[theano.config.floatX](0), "zero")
# Square of the L2 norm; one regularization option is to require the
# square of the L2 norm to be small.
l2_sqr = np.sum([ly.l2_sqr for ly in layers if ly.l2_sqr is not None])
if not l2_sqr:
log.debug("No L2 regularization in this model.")
l2_sqr = theano.shared(np.cast[theano.config.floatX](0), "zero")
return l1, l2_sqr
def _build_layers_train(self, layer_defs, stored_network=None):
"""Creates a stack of neural network layers from the input layer definitions.
This network is intended for use in training.
**Parameters**
* `layer_defs` <list>
A list of Layer definitions. May contain Layers, in which case they're added
directly to the list of output Layers.
**Optional Parameters**
* `stored_network` <str|None>
A filename containing a previously stored neural network. If any layer definitions
specify that they should be initialized with weights from an existing network,
use the weights in the `stored_network`.
**Returns**
A list of initialized (but not compiled) neural network Layers.
**Modifies**
None
"""
if stored_network is not None:
log.info('Reading weights from an existing network at "{}".'.format(stored_network))
stored_network = collections.OrderedDict(files.read_pickle(stored_network)["params"])
log.info("Building the \"{}\" network.".format(self.name))
if isinstance(layer_defs[0], layers.InputLayer):
layer_objs = []
else:
# Initialize the layers with an input layer, if we don't have one already.
layer_objs = [layers.InputLayer(self.n_in, name="input")]
for ly in layer_defs:
if isinstance(ly, layers.Layer):
# If this is already a Layer object, don't try to re-create it.
layer_objs.append(ly)
else:
prev_ly = layer_objs[-1]
if len(ly) == 1:
ly.append({}) # No extra layer arguments.
layer_name = ly[0]
if not layer_name.endswith("Layer"):
# All class names end with "Layer".
layer_name += "Layer"
if ((layer_name.startswith("BC01ToC01B") or layer_name.startswith("C01BToBC01"))
and theano.config.device == "cpu"):
log.warning("Skipping \"{}\" reshuffling layer for "
"CPU training.".format(layer_name))
continue
layer_kwargs = ly[1].copy()
init_from = layer_kwargs.pop("load_params", False)
if init_from:
if init_from not in stored_network:
raise ValueError("Couldn't find weights for layer {} in the input "
"weights.".format(init_from))
layer_type = getattr(layers, layer_name)
if "batch_size" in inspect.getargspec(layer_type.__init__).args:
layer_kwargs.setdefault("batch_size", self.batch_size)
layer_objs.append(layer_type(n_in=prev_ly.n_out, rng=self.rng,
theano_rng=self.theano_rng, **layer_kwargs))
log.info("Added layer: {}".format(str(layer_objs[-1])))
if init_from:
# Copy weights from the input file into this layer.
for param, input_params in zip(layer_objs[-1].params,
stored_network[init_from]):
param.set_value(input_params[1], borrow=True)
log.info("Copied input parameters from layer {} to layer "
"{}.".format(init_from, layer_objs[-1].name))
return layer_objs
def _duplicate_layer_stack(self, layer_stack):
"""Creates a stack of neural network Layers identical to the input `layer_stack`, and
with weights tied to those Layers. This is useful to, for example, create a parallel
network to be used for inference.
**Parameters**
* `layer_stack` <list of Layers>
A list of initialized Layers.
**Returns**
A list of initialized (but not compiled) neural network Layers.
**Modifies**
None
"""
layer_objs = []
for i_ly, ly in enumerate(layer_stack):
layer_type = type(ly)
layer_kwargs = ly.get_params()
# Construct a parallel network for inference. Tie the weights to the training network.
layer_kwargs.update(layer_stack[i_ly].get_trainable_params())
layer_objs.append(layer_type(rng=self.rng, theano_rng=self.theano_rng, **layer_kwargs))
return layer_objs
def get_loss(self, name, targets=None, inference=False, regularized=None):
"""Return a loss function.
Parameters
----------
name : str
Name of the loss function. One of ["nll", "error"]. May
also be a list, in which case this function will return
a list of loss functions.
targets : theano symbolic variable, optional
If None, will be initialized to a T.imatrix named "y".
inference : bool, optional
If True, return the loss from the inference network (for
e.g. model validation). Otherwise use the training network.
regularized : bool, optional
Add regularization parameters to the loss? Default to True
if `inference` is False and False if `inference` is True.
Returns
-------
Theano symbolic variable
Represents the requested loss, or a list of symbolic
variables if `name` is list-like.
"""
if self.input is None:
raise RuntimeError("Compile this network before getting a loss function.")
if regularized is None:
regularized = not inference
# If we got a list as input, return a list of loss functions.
if misc.is_listlike(name):
return [self.get_loss(n, targets=targets, inference=inference, regularized=regularized)
for n in name]
input_name = name
name = name.lower()
if name == "nll":
name = "negative_log_likelihood"
name = name.replace(" ", "_")
if inference:
output_layer = self.layers_inf[-1]
else:
output_layer = self.layers_train[-1]
# Look for the cost function in the output layer.
if not hasattr(output_layer, name):
raise ValueError("Unrecognized loss function: \"{}\".".format(input_name))
if targets is None:
targets = T.imatrix("y") # Labels, presented as 2D array of [int] labels
loss = getattr(output_layer, name)(targets)
if regularized:
loss = loss + self.l1 + self.l2_sqr
return loss
def compile(self, input, recompile=False):
"""Compile the theano computation graphs and functions
associated with this network.
Parameters
----------
input : Theano symbolic variable
The input to the network
recompile : bool, optional
If False, will not recompile an already-compiled network.
"""
if self.input is not None:
if recompile:
log.warning("Recompiling and resetting the existing network.")
else:
log.debug("This object already compiled. Not recompiling.")
return
self.input = input
log.info("Compiling the \"{}\" training network.".format(self.name))
prev_output = input
for ly in self.layers_train:
ly.compile(prev_output)
ly.compile_activations(self.input)
prev_output = ly.output
log.info("Compiling the \"{}\" inference network.".format(self.name))
prev_output = input
for ly in self.layers_inf:
ly.compile(prev_output)
ly.compile_activations(self.input)
prev_output = ly.output_inf
# Allow predicting on fresh features.
self.p_y_given_x = self.layers_inf[-1].p_y_given_x
self.predict_proba = theano.function(inputs=[self.input], outputs=self.p_y_given_x)
self.predict = theano.function(inputs=[self.input], outputs=self.layers_inf[-1].y_pred)
# Now that we've compiled the network, we can restore a previous
# Theano RNG state, if any. The "pickled_theano_rng" will only be
# non-None if this object was unpickled.
self._set_theano_rng(self.pickled_theano_rng)
self.pickled_theano_rng = None
def get_init_params(self):
return dict(n_in=self.n_in, layer_defs=self.layer_defs,
name=self.name, batch_size=self.batch_size,
stored_network=self.stored_network)
def set_trainable_params(self, inp, layers=None):
"""Set the trainable parameters in this network from trainable
parameters in an input.
Parameters
----------
inp : NNClassifier or string
May be an existing NNClassifier, or a filename
pointing to either a checkpoint or a pickled NNClassifier.
layers : list of strings, optional
If provided, set parameters only for the layers with these
names, using layers with corresponding names in the input.
"""
# Get the input and check its type.
# If the input is a string, try reading it first as a
# checkpoint file, and then as a NNClassifier pickle.
if isinstance(inp, six.string_types):
try:
inp = files.checkpoint_read(inp, get_metadata=False)
except files.CheckpointError as err:
inp = files.read_pickle(inp)
if not isinstance(inp, NNClassifier):
raise TypeError("Unable to restore weights from a \"{}\" object.".format(type(inp)))
# Go through each layer in this object and set its weights.
for ly in self.layers_train:
if layers is not None and ly not in layers:
continue
if ly.has_trainable_params:
ly.set_trainable_params(inp.get_layer(ly.name))
log.debug("Set trainable parameters in layer {} "
"from input weights.".format(ly.name))
def get_layer(self, name, inf=False):
"""Returns the Layer object with the given name.
Parameters
----------
name : str
Name of the desired Layer object
inf : bool, optional
If True, search the inference (deterministic) Layers,
otherwise search the training Layers.
"""
layers = self.layers_inf if inf else self.layers_train
for ly in layers:
if ly.name == name:
return ly
else:
raise ValueError("Layer \"{}\" is not present in "
"network \"{}\".".format(name, self.name))
def set_rng(self, rng, theano_rng=None):
"""Set the pseudo-random number generator in this object
and in all Layers of this object.
Parameters
----------
rng : int or numpy.random.RandomState or `RandomState.get_state()`
theano_rng : theano.tensor.shared_randomstreams import RandomStreams, optional
If not supplied, will be initialized from the `rng`
Modifies
--------
`self.rng` and `self.theano_rng` will be set with RNGs.
Each Layer in `self.layers_train` and `self.layers_inf` will have their RNGs set
to be the same objects as this network's new RNGs.
"""
# Set up the random number generator, if necessary.
if rng is None:
log.debug("Making new NNet RNG")
rng = np.random.RandomState()
elif isinstance(rng, int):
# If we got a seed as input.
log.debug("Setting RNG seed to {}.".format(rng))
rng = np.random.RandomState(rng)
elif not isinstance(rng, np.random.RandomState):
# Assume that anything else is the state of the RNG.
log.debug("Initializing numpy RNG from previous state.")
rng_state = rng
rng = np.random.RandomState()
rng.set_state(rng_state)
if theano_rng is None:
log.debug("Initializing new Theano RNG.")
theano_rng = RandomStreams(rng.randint(2 ** 30))
self.rng = rng
self.theano_rng = theano_rng
for ly in self.layers_train + self.layers_inf:
ly.rng = self.rng
ly.theano_rng = self.theano_rng
def _set_theano_rng(self, rng_state=None):
"""Set the current state of the theano_rng from a pickled state.
.. note:: This can only be done after compiling the network! The Theano
RNG needs to see where it fits in to the graph.
http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs
"""
if rng_state is not None:
for (su, input_su) in zip(self.theano_rng.state_updates, rng_state):
su[0].set_value(input_su)
def __getstate__(self):
"""Preserve the object's state.
Don't try to pickle the Theano objects directly;
Theano changes quickly. Store the values of layer weights
as arrays instead (handled in the Layers' __getstate__ functions)
and clear all compiled functions and symbolic variables.
Those will need to be re-compiled before the network can be used again.
"""
state = self.__dict__.copy()
state["p_y_given_x"], state["predict_proba"], state["predict"] = None, None, None
state["l1"], state["l2_sqr"] = None, None
state["params"], state["param_update_rules"] = None, None
state["layers_inf"] = [] # This is redundant with `layers_train`; don't save both.
state["rng"] = self.rng.get_state()
state["input"] = None
# http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs
state["pickled_theano_rng"] = [su[0].get_value() for su in self.theano_rng.state_updates]
state["theano_rng"] = None
return state
def __setstate__(self, state):
"""Allow unpickling from stored weights.
"""
self.__dict__.update(state)
# Reconstruct this object's RNG.
# The theano_rng won't be completely reconstructed until we recompile the network.
self.set_rng(self.rng, self.theano_rng)
# Rebuild everything we had to take apart before saving. Note that we'll
# still need to call `compile` to make the network fully operational again.
self.layers_inf = self._duplicate_layer_stack(self.layers_train)
self.l1, self.l2_sqr = self._get_regularization(self.layers_train)
# Collect the trainable parameters from each layer and arrange them into lists.
self.params, self.param_update_rules, self.n_params = self._arrange_parameters(self.layers_train)
def fit(self, X, y=None, valid=None, test=None,
n_epochs=None, batch_size=None,
augmentation=None, checkpoint=None,
sgd_type="adadelta", lr_rule=None,
momentum_rule=None, sgd_max_grad_norm=None,
train_loss="nll", valid_loss="nll", test_loss=["error", "nll"],
validation_frequency=None, validate_on_train=False,
checkpoint_all=False, extra_metadata=None,):
"""Perform supervised training on the input data.
When restoring a pickled `NNClassifier` object to resume training,
data, augmentation functions, and checkpoint locations must be
re-entered, but other parameters will be taken from the previously
stored training state. (The `n_epochs` may be re-supplied to alter
the number of epochs used, but will default to the previously
supplied `n_epochs`.)
Training may be stopped early by pressing ctrl-C.
Training data may be provided in either of the following formats:
- An array of (n_examples, n_features) in the first positional
argument (keyed by `X`), and an array of (n_examples, n_labels)
in the second positional argument (keyed by `y`)
- An object of type `readers.DataWithHoldoutParitions` or `readers.Data`
presented in the first positional argument
Validation data may be optionally supplied with the `valid` key
in one of the following formats (only if the training data were not
given as a `readers.DataWithHoldoutParitions` object):
- A tuple of (X, y), where `X` is an array of
(n_validation_examples, n_features) and `y` is an array of
(n_validation_examples, n_labels)
- A `readers.Data` object
- A float in the range [0, 1), in which case validation data will
be held out from the supplied training data (only if training
data were given as an array)
Test data may be optionally supplied with the `test` key, using the same
formats as for validation data.
Parameters
----------
X, y, valid, test
See above for discussion of allowed input formats.
n_epochs : int
Train for this many epochs. (An "epoch" is one complete pass through
the training data.) Must be supplied unless resuming training.
batch_size : int
Number of examples in a minibatch. Must be provided if was
not given during object construction.
augmentation : function, optional
Apply this function to each minibatch of training data.
checkpoint : str, optional
Filename for storing network during training. If supplied,
Arignote will store the network after every epoch, as well
as storing the network with the best validation loss and
the final network. When using a checkpoint, the trainer
will restore the network with best validation loss at the
end of training.
sgd_type : {"adadelta", "nag", "adagrad", "rmsprop", "sgd"}
Choice for stochastic gradient descent algorithm to use in training
lr_rule, momentum_rule : dict of sgd_updates.Rule params, optional
Use these dictionaries of parameters to create Rule objects
which describe how to alter the learning rate and momentum
during training.
train_loss, valid_loss : {"nll", "error"}
Loss function for training and validation. With a custom
output layer, may also be the name of a function which returns
a theano symbolic variable giving the cost.
("nll" = "negative log likelihood")
test_loss : str or list
May be any of the loss functions usable for training, or
a list of such functions.
Other Parameters
----------------
sgd_max_grad_norm : float, optional
If provided, scale gradients during training so that the norm
of all gradients is no more than this value.
validation_frequency : int, optional
Check the validation loss after training on this many examples.
Defaults to validating once per epoch.
validate_on_train : bool, optional
If set, calculate validation loss (using the deterministic
network) on the training set as well.
checkpoint_all : str, optional
Keep the state of the network at every training step.
Warning: may use lots of hard drive space.
extra_metadata : dict, optional
Store these keys with the pickled object.
Returns
-------
self : NNClassifier
Examples
--------
>>> lr_rule = {"rule": "stalled", "initial_value": 0.1, "multiply_by": 0.25, "interval": 5}
>>> momentum_rule = {"rule": "stalled", "initial_value": 0.7, "decrease_by": -0.1,
"final_value": 0.95, "interval": 5}
>>> mnist_data = files.read_pickle(sample_data.mnist)
>>> classifier.fit(mnist_data[0], n_epochs=50, valid=mnist_data[1], test=mnist_data[2],
augmentation=None, checkpoint=checkpoint, sgd_type="nag",
lr_rule=lr_rule, momentum_rule=momentum_rule, batch_size=128,
train_loss="nll", valid_loss="nll", test_loss=["nll", "error"])
"""
if batch_size is None:
batch_size = self.batch_size
# If the inputs are not `Data` objects, we need to wrap them before
# the Trainer can make use of them.
train_data = X if y is None else (X, y)
train, valid, test = readers.to_data_partitions(train_data, valid, test, batch_size=batch_size)
# If we didn't previously know how many features to expect in the input, we can now
# build the layers of this neural network.
if self.n_in is None:
self._build_network(train.features.shape, batch_size=batch_size)
if self.trainer is not None:
trainer = self.trainer
else:
trainer = training.SupervisedTraining(sgd_type=sgd_type,
lr_rule=lr_rule,
momentum_rule=momentum_rule,
sgd_max_grad_norm=sgd_max_grad_norm,
max_epochs=n_epochs,
validation_frequency=validation_frequency,
validate_on_train=validate_on_train,
train_loss=train_loss,
valid_loss=valid_loss,
test_loss=test_loss)
self.trainer = trainer
trained_network = trainer.fit(self, train, n_epochs=n_epochs, valid=valid, test=test,
augmentation=augmentation, extra_metadata=extra_metadata,
checkpoint=checkpoint, checkpoint_all=checkpoint_all)
return trained_network
|
mit
| -1,436,817,007,690,568,200
| 42.929825
| 117
| 0.592202
| false
| 4.206194
| false
| false
| false
|
zeffii/BlenderLSystem3D
|
sverchok_script_node_version/3dlsystem.py
|
1
|
6593
|
import math
from math import radians
import random
from random import randint
import ast
import bmesh
import mathutils
from mathutils import Vector, Euler
"""
lifted from: http://www.4dsolutions.net/ocn/lsystems.html
"""
class Lturtle:
import mathutils
from mathutils import Vector, Euler
Xvec = Vector((1, 0, 0))
Yvec = Vector((0, 1, 0))
Zvec = Vector((0, 0, 1))
# looking down on YX axis. Z is vertical.
stackstate = [] # remembers saved state
delta = 0.2 # angle of rotation
length = 0.5 # full length of turtle move
thickness = 0.02 # default thickness of cylinder
instrudict = {
'+': 'turnleft',
'-': 'turnright',
'&': 'pitchdown',
'^': 'pitchup',
'<': 'leftroll',
'>': 'rightroll',
'[': 'storeloc_rot',
']': 'restoreloc_rot',
'%': 'roll180',
'$': 'rollhoriz',
'x': 'randturn',
't': 'gravity',
'F': 'fdraw',
'f': 'fnodraw',
'Z': 'halfdraw',
'z': 'halfnodraw',
'g': 'Fnorecord',
'.': 'Nomove'
}
stored_states = []
verts = []
edges = []
def __init__(self, vPos=Vector((0, 0, 0))):
self.vHeading = Vector((0, 0, 1))
self.vPos = vPos
self.delta = 0.2
self.amp = 1.0
def chomp(self, instructions):
getparam = 0
checkparam = 0
param = ""
for item in instructions:
if getparam:
if item == ")":
getparam = 0 # done getting
command = command + "(" + param + ")"
eval(command)
continue
else:
param = param + item # building parameter
continue
if checkparam: # checking for parameter?
checkparam = 0
if item == "(":
param = ""
getparam = 1 # parameter exists
continue
else:
command = command + "()" # no parameter
eval(command)
# initializing command string
command = "self." + self.instrudict.get(item, 'notHandled')
checkparam = 1 # set flag
else: # dealing with last item
if checkparam:
command = command + "()" # no parameter
eval(command)
def add_edge(self):
i = len(self.verts)
self.edges.append([i - 2, i - 1])
def add_verts(self, amp=1):
self.verts.append(self.vPos[:])
self.vPos = self.vPos + (self.vHeading * self.length * amp)
self.verts.append(self.vPos[:])
def fnodraw(self, n=""):
self.vPos = self.vPos + self.vHeading * self.length
print("Forward %s (no draw)" % n)
def halfnodraw(self, n=""):
self.vPos = self.vPos + (self.vHeading * self.length * 0.5)
print("half no draw %s" % n)
def fdraw(self, n=""):
self.add_verts()
self.add_edge()
print("fdraw %s" % n)
def halfdraw(self, n=""):
self.add_verts(amp=0.5)
self.add_edge()
print("half draw %s" % n)
# Turning, Pitch, Roll
def storeloc_rot(self, n=""):
self.stored_states.append([self.vPos, self.vHeading])
print("Store rotation and location %s" % n)
def restoreloc_rot(self, n=""):
if len(self.stored_states) > 0:
self.vPos, self.vHeading = self.stored_states.pop()
print("Restore rotation and location %s" % n)
else:
print('tried restore loc/rot but stored states was empty. you suck :)')
def do_rotation(self, axis, sign, n=""):
""" axis 0=x, 1=y, z=2 """
if n:
self.delta = float(n)
components = [0, 0, 0]
components[axis] = sign * radians(self.delta) * self.amp
myEul = Euler(components, 'XYZ')
self.vHeading.rotate(myEul)
def turnleft(self, n=""):
self.do_rotation(1, 2, n)
print("Turn Left around Z axis %s" % n)
def turnright(self, n=""):
self.do_rotation(-1, 2, n)
print("Turn Right around Z axis %s" % n)
def pitchdown(self, n=""):
self.do_rotation(1, 1, n)
print("Pitch down %s" % n)
def pitchup(self, n=""):
self.do_rotation(-1, 1, n)
print("Pitch up %s" % n)
def leftroll(self, n=""):
self.do_rotation(1, 0, n)
print("left roll %s" % n)
def rightroll(self, n=""):
self.do_rotation(-1, 0, n)
print("right roll %s" % n)
def turn180(self, n=""):
self.do_rotation(-1, 2, 180)
print("turn180 %s" % n)
def roll180(self, n=""):
self.do_rotation(1, 0, 180)
print("roll180 %s" % n)
def rollhoriz(self, n=""):
# not exactly sure what this command was intended to do but how
# about resetting to vertical.
self.vHeading = Vector((0, 0, 1))
print("roll horiz %s" % n)
def randturn(self, n=""):
ax_x = radians(randint(0, 360))
ax_y = radians(randint(0, 360))
ax_z = radians(randint(0, 360))
myEul = Euler((ax_x, ax_y, ax_z), 'XYZ')
self.vHeading.rotate(myEul)
print("randturn %s" % n)
def gravity(self, n=""):
print("not handled yet")
print("gravity %s" % n)
def Fnorecord(self, n=""):
print("Fnorecord %s" % n)
def Nomove(self, n=""):
print("No move %s" % n)
def notHandled(self, n=""):
print("Not handled %s" % n)
def sv_main(t_angle=0.2):
verts_out = []
edges_out = []
in_sockets = [
['s', 't_angle', t_angle]
]
def produce(axiom, rules):
output = ""
for i in axiom:
output = output + rules.get(i, i)
return output
def iterate(n, axiom, rules):
if n > 0:
axiom = produce(axiom, rules)
return iterate(n - 1, axiom, rules)
return axiom
texts = bpy.data.texts
f = texts['RULES'].as_string()
rules = {}
rules = ast.literal_eval(f)
axiom = 'I'
m = iterate(5, axiom, rules)
ffff = 'poonjab' in globals()
poonjab = Lturtle()
poonjab.verts = []
poonjab.edges = []
poonjab.amp = t_angle
poonjab.chomp(m)
verts_out.extend(poonjab.verts)
edges_out.extend(poonjab.edges)
out_sockets = [
['v', 'verts', [verts_out]],
['s', 'edges', [edges_out]]
]
return in_sockets, out_sockets
|
gpl-3.0
| -1,039,659,748,590,810,900
| 25.162698
| 83
| 0.502654
| false
| 3.423157
| false
| false
| false
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/commands/watchpoints/hello_watchlocation/TestWatchLocation.py
|
1
|
4428
|
"""
Test lldb watchpoint that uses '-s size' to watch a pointed location with size.
"""
from __future__ import print_function
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class HelloWatchLocationTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(
self.source, '// Set break point at this line.')
# This is for verifying that watch location works.
self.violating_func = "do_bad_thing_with_location"
# Build dictionary to have unique executable names for each test
# method.
self.exe_name = self.testMethodName
self.d = {'CXX_SOURCES': self.source, 'EXE': self.exe_name}
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24446: WINDOWS XFAIL TRIAGE - Watchpoints not supported on Windows")
# Most of the MIPS boards provide only one H/W watchpoints, and S/W
# watchpoints are not supported yet
@expectedFailureAll(triple=re.compile('^mips'))
# SystemZ and PowerPC also currently supports only one H/W watchpoint
@expectedFailureAll(archs=['powerpc64le', 's390x'])
@expectedFailureNetBSD
@skipIfDarwin
def test_hello_watchlocation(self):
"""Test watching a location with '-s size' option."""
self.build(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
exe = self.getBuildArtifact(self.exe_name)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint to set a watchpoint when stopped on the breakpoint.
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=False)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to the breakpoint.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# Now let's set a write-type watchpoint pointed to by 'g_char_ptr'.
self.expect(
"watchpoint set expression -w write -s 1 -- g_char_ptr",
WATCHPOINT_CREATED,
substrs=[
'Watchpoint created',
'size = 1',
'type = w'])
# Get a hold of the watchpoint id just created, it is used later on to
# match the watchpoint id which is expected to be fired.
match = re.match(
"Watchpoint created: Watchpoint (.*):",
self.res.GetOutput().splitlines()[0])
if match:
expected_wp_id = int(match.group(1), 0)
else:
self.fail("Grokking watchpoint id faailed!")
self.runCmd("expr unsigned val = *g_char_ptr; val")
self.expect(self.res.GetOutput().splitlines()[0], exe=False,
endstr=' = 0')
self.runCmd("watchpoint set expression -w write -s 4 -- &threads[0]")
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should be 0 initially.
self.expect("watchpoint list -v",
substrs=['hit_count = 0'])
self.runCmd("process continue")
# We should be stopped again due to the watchpoint (write type), but
# only once. The stop reason of the thread should be watchpoint.
self.expect("thread list", STOPPED_DUE_TO_WATCHPOINT,
substrs=['stopped',
'stop reason = watchpoint %d' % expected_wp_id])
# Switch to the thread stopped due to watchpoint and issue some
# commands.
self.switch_to_thread_with_stop_reason(lldb.eStopReasonWatchpoint)
self.runCmd("thread backtrace")
self.expect("frame info",
substrs=[self.violating_func])
# Use the '-v' option to do verbose listing of the watchpoint.
# The hit count should now be 1.
self.expect("watchpoint list -v",
substrs=['hit_count = 1'])
self.runCmd("thread backtrace all")
|
apache-2.0
| -3,877,249,198,937,931,000
| 38.185841
| 98
| 0.610659
| false
| 3.982014
| true
| false
| false
|
hoburg/gpkit
|
docs/source/examples/boundschecking.py
|
1
|
1592
|
"Verifies that bounds are caught through monomials"
from gpkit import Model, parse_variables
from gpkit.exceptions import UnboundedGP, UnknownInfeasible
class BoundsChecking(Model):
"""Implements a crazy set of unbounded variables.
Variables
---------
Ap [-] d
D [-] e
F [-] s
mi [-] c
mf [-] r
T [-] i
nu [-] p
Fs 0.9 [-] t
mb 0.4 [-] i
rf 0.01 [-] o
V 300 [-] n
Upper Unbounded
---------------
F
Lower Unbounded
---------------
D
"""
@parse_variables(__doc__, globals())
def setup(self):
self.cost = F
return [
F >= D + T,
D == rf*V**2*Ap,
Ap == nu,
T == mf*V,
mf >= mi + mb,
mf == rf*V,
Fs <= mi
]
m = BoundsChecking()
print(m.str_without(["lineage"]))
try:
m.solve()
except UnboundedGP:
gp = m.gp(checkbounds=False)
missingbounds = gp.check_bounds()
try:
sol = gp.solve(verbosity=0) # Errors on mosek_cli
except UnknownInfeasible: # pragma: no cover
pass
bpl = ", but would gain it from any of these sets: "
assert missingbounds[(m.D.key, 'lower')] == bpl + "[(%s, 'lower')]" % m.Ap
assert missingbounds[(m.nu.key, 'lower')] == bpl + "[(%s, 'lower')]" % m.Ap
# ordering is arbitrary:
assert missingbounds[(m.Ap.key, 'lower')] in (
bpl + ("[(%s, 'lower')] or [(%s, 'lower')]" % (m.D, m.nu)),
bpl + ("[(%s, 'lower')] or [(%s, 'lower')]" % (m.nu, m.D)))
|
mit
| 1,190,010,389,241,384,700
| 23.492308
| 75
| 0.476131
| false
| 3.109375
| false
| false
| false
|
daniel-noland/MemoryOracle
|
gdbwatch/gdbtest/mem/DynamicBreak.py
|
1
|
3858
|
#!/usr/bin/env python
# -*- encoding UTF-8 -*-
# THIS CODE DERIVED FORM cma.py
import gdb
import signal
import re
import threading
from .Heap import Heap
#-----------------------------------------------------------------------
#Archs
# TODO: Update all arch classes to use gdb.Architecture checks instead of this
# hack
class Arch(object):
class x86_32(object):
@staticmethod
def is_current():
if gdb.execute("info reg", True, True).find("eax") >= 0:
return True
return False
@staticmethod
def get_arg(num):
if num > 1:
raise Exception("get_arg %d is not supported." %num)
gdb.execute("up", False, True)
ret = long(gdb.parse_and_eval(
"*(unsigned int *)($esp + " + str(num * 4) + ")")
)
gdb.execute("down", False, True)
return ret
@staticmethod
def get_ret():
return long(gdb.parse_and_eval("$eax"))
class x86_64(object):
@staticmethod
def is_current():
return gdb.newest_frame().architecture().name() == "i386:x86-64"
@staticmethod
def get_arg(num):
if num == 0:
return long(gdb.newest_frame().read_register("rdi"))
elif num == 1:
return long(gdb.newest_frame().read_register("rsi"))
else:
raise Exception("get_arg %d is not supported." %num)
@staticmethod
def get_ret(self):
return long(gdb.newest_frame().read_register("rax"))
class arm(object):
@staticmethod
def is_current():
if gdb.execute("info reg", True, True).find("cpsr") >= 0:
return True
return False
@staticmethod
def get_arg(num):
if num == 0:
return long(gdb.parse_and_eval("$r0"))
elif num == 1:
return long(gdb.parse_and_eval("$r1"))
else:
raise Exception("get_arg %d is not supported." %num)
@staticmethod
def get_ret():
return long(gdb.parse_and_eval("$r0"))
archs = (Arch.x86_32, Arch.x86_64, Arch.arm)
current = None
for e in Arch.archs:
if e.is_current():
Arch.current = e
break
else:
raise Exception("Current architecture is not supported by MemoryOracle.")
arch = Arch.current
class BreakException(Exception):
pass
class DynamicBreak(gdb.Breakpoint):
@staticmethod
def _heap_track(ret, size):
print("_tracked ", ret, size)
gdb.execute("echo " + str(size) )
not_released_add(ret, size)
@staticmethod
def _heap_release():
print("_released ", arch.get_arg(0))
released_add(arch.get_arg(0))
class DynamicBreakAlloc(DynamicBreak):
allocs = dict()
def stop(self):
size = arch.get_arg(0)
fin = DynamicBreakAllocFinish()
return False
class DynamicBreakAllocFinish(gdb.FinishBreakpoint):
def stop(self):
print("finish return " + str(hex(arch.get_ret())))
return False
class DynamicBreakCalloc(DynamicBreak):
def event(self):
size = arch.get_arg(0) * arch.get_arg(1)
DynamicBreak._disable_finish_enable()
self._heap_track(arch.get_ret(), size)
class DynamicBreakRealloc(DynamicBreak):
def event(self):
super()._heap_release()
size = arch.get_arg(1)
DynamicBreak._disable_finish_enable()
super()._heap_track(arch.get_ret(), size)
class DynamicBreakRelease(DynamicBreak):
def event(self):
super()._heap_release()
DynamicBreak._disable_finish_enable()
b = DynamicBreakAlloc("operator new", gdb.BP_BREAKPOINT, gdb.WP_READ, True)
print("hello")
|
lgpl-3.0
| -5,980,768,733,821,877,000
| 24.215686
| 81
| 0.550544
| false
| 3.920732
| false
| false
| false
|
sha-red/django-shared-utils
|
shared/utils/fields.py
|
1
|
1882
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .text import slugify
# TODO Remove deprecated location
from .models.slugs import AutoSlugField
def uniquify_field_value(instance, field_name, value, max_length=None, queryset=None):
"""
Makes a char field value unique by appending an index, taking care of the
field's max length.
FIXME Doesn't work with model inheritance, where the field is part of the parent class.
"""
def get_similar_values(value):
return queryset.exclude(pk=instance.pk) \
.filter(**{"%s__istartswith" % field_name: value}).values_list(field_name, flat=True)
if not value:
raise ValueError("Cannot uniquify empty value")
# TODO Instead get value from instance.field, or use a default value?
if not max_length:
max_length = instance._meta.get_field(field_name).max_length
if not queryset:
queryset = instance._meta.default_manager.get_queryset()
# Find already existing counter
m = re.match(r'(.+)(-\d+)$', value)
if m:
base_value, counter = m.groups()
index = int(counter.strip("-")) + 1
else:
base_value = value
index = 2 # Begin appending "-2"
similar_values = get_similar_values(value)
while value in similar_values or len(value) > max_length:
value = "%s-%i" % (base_value, index)
if len(value) > max_length:
base_value = base_value[:-(len(value) - max_length)]
value = "%s-%i" % (base_value, index)
similar_values = get_similar_values(base_value)
index += 1
return value
# TODO Remove alias
def unique_slug(instance, slug_field, slug_value, max_length=50, queryset=None):
slug_value = slugify(slug_value)
return uniquify_field_value(instance, slug_field, slug_value, max_length=50, queryset=None)
|
mit
| -4,836,092,466,750,654,000
| 33.218182
| 97
| 0.644527
| false
| 3.668616
| false
| false
| false
|
gnarayan/WDmodel
|
WDmodel/main.py
|
1
|
11665
|
# -*- coding: UTF-8 -*-
"""
The WDmodel package is designed to infer the SED of DA white dwarfs given
spectra and photometry. This main module wraps all the other modules, and their
classes and methods to implement the alogrithm.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import mpi4py
import numpy as np
from . import io
from . import WDmodel
from . import passband
from . import covariance
from . import fit
from . import viz
sys_excepthook = sys.excepthook
def mpi_excepthook(excepttype, exceptvalue, traceback):
"""
Overload :py:func:`sys.excepthook` when using :py:class:`mpi4py.MPI` to
terminate all MPI processes when an Exception is raised.
"""
sys_excepthook(excepttype, exceptvalue, traceback)
mpi4py.MPI.COMM_WORLD.Abort(1)
def main(inargs=None):
"""
Entry point for the :py:mod:`WDmodel` fitter package.
Parameters
----------
inargs : dict, optional
Input arguments to configure the fit. If not specified
:py:data:`sys.argv` is used. inargs must be parseable by
:py:func:`WDmodel.io.get_options`.
Raises
------
RuntimeError
If user attempts to resume the fit without having run it first
Notes
-----
The package is structured into several modules and classes
================================================= ===================
Module Model Component
================================================= ===================
:py:mod:`WDmodel.io` I/O methods
:py:class:`WDmodel.WDmodel.WDmodel` SED generator
:py:mod:`WDmodel.passband` Throughput model
:py:class:`WDmodel.covariance.WDmodel_CovModel` Noise model
:py:class:`WDmodel.likelihood.WDmodel_Likelihood` Likelihood function
:py:class:`WDmodel.likelihood.WDmodel_Posterior` Posterior function
:py:mod:`WDmodel.fit` "Fitting" methods
:py:mod:`WDmodel.viz` Viz methods
================================================= ===================
This method implements our algorithm to infer the DA White Dwarf properties
and construct the SED model given the data using the methods and classes
listed above. Once the data is read, the model is configured, and the
liklihood and posterior functions constructed, the fitter methods evaluate
the model parameters given the data, using the samplers in :py:mod:`emcee`.
:py:mod:`WDmodel.mossampler` provides an overloaded
:py:class:`emcee.PTSampler` with a more reliable auto-correlation estimate.
Finally, the result is output along with various plots.
"""
comm = mpi4py.MPI.COMM_WORLD
size = comm.Get_size()
if size > 1:
# force all MPI processes to terminate if we are running with --mpi and an exception is raised
sys.excepthook = mpi_excepthook
if inargs is None:
inargs = sys.argv[1:]
# parse the arguments
args, pool= io.get_options(inargs, comm)
specfile = args.specfile
spectable = args.spectable
lamshift = args.lamshift
vel = args.vel
bluelim, redlim = args.trimspec
rebin = args.rebin
rescale = args.rescale
blotch = args.blotch
outdir = args.outdir
outroot = args.outroot
photfile = args.photfile
rvmodel = args.reddeningmodel
phot_dispersion = args.phot_dispersion
pbfile = args.pbfile
excludepb = args.excludepb
ignorephot= args.ignorephot
covtype = args.covtype
coveps = args.coveps
samptype = args.samptype
ascale = args.ascale
ntemps = args.ntemps
nwalkers = args.nwalkers
nburnin = args.nburnin
nprod = args.nprod
everyn = args.everyn
thin = args.thin
redo = args.redo
resume = args.resume
discard = args.discard
balmer = args.balmerlines
ndraws = args.ndraws
savefig = args.savefig
##### SETUP #####
# set the object name and create output directories
objname, outdir = io.set_objname_outdir_for_specfile(specfile, outdir=outdir, outroot=outroot,\
redo=redo, resume=resume)
message = "Writing to outdir {}".format(outdir)
print(message)
# init the model
model = WDmodel.WDmodel(rvmodel=rvmodel)
if not resume:
# parse the parameter keywords in the argparse Namespace into a dictionary
params = io.get_params_from_argparse(args)
# get resolution - by default, this is None, since it depends on instrument settings for each spectra
# we can look it up from a lookup table provided by Tom Matheson for our spectra
# a custom argument from the command line overrides the lookup
fwhm = params['fwhm']['value']
fwhm, lamshift = io.get_spectrum_resolution(specfile, spectable, fwhm=fwhm, lamshift=lamshift)
params['fwhm']['value'] = fwhm
# read spectrum
spec = io.read_spec(specfile)
# pre-process spectrum
out = fit.pre_process_spectrum(spec, bluelim, redlim, model, params,\
rebin=rebin, lamshift=lamshift, vel=vel, blotch=blotch, rescale=rescale)
spec, cont_model, linedata, continuumdata, scale_factor, params = out
# get photometry
if not ignorephot:
phot = io.get_phot_for_obj(objname, photfile)
else:
params['mu']['value'] = 0.
params['mu']['fixed'] = True
phot = None
# exclude passbands that we want excluded
pbnames = []
if phot is not None:
pbnames = np.unique(phot.pb)
if excludepb is not None:
pbnames = list(set(pbnames) - set(excludepb))
# filter the photometry recarray to use only the passbands we want
useind = [x for x, pb in enumerate(phot.pb) if pb in pbnames]
useind = np.array(useind)
phot = phot.take(useind)
# set the pbnames from the trimmed photometry recarray to preserve order
pbnames = list(phot.pb)
# if we cut out out all the passbands, force mu to be fixed
if len(pbnames) == 0:
params['mu']['value'] = 0.
params['mu']['fixed'] = True
phot = None
# save the inputs to the fitter
outfile = io.get_outfile(outdir, specfile, '_inputs.hdf5', check=True, redo=redo, resume=resume)
io.write_fit_inputs(spec, phot, cont_model, linedata, continuumdata,\
rvmodel, covtype, coveps, phot_dispersion, scale_factor, outfile)
else:
outfile = io.get_outfile(outdir, specfile, '_inputs.hdf5', check=False, redo=redo, resume=resume)
try:
spec, cont_model, linedata, continuumdata, phot, fit_config = io.read_fit_inputs(outfile)
except IOError as e:
message = '{}\nMust run fit to generate inputs before attempting to resume'.format(e)
raise RuntimeError(message)
rvmodel = fit_config['rvmodel']
covtype = fit_config['covtype']
coveps = fit_config['coveps']
scale_factor = fit_config['scale_factor']
phot_dispersion = fit_config['phot_dispersion']
if phot is not None:
pbnames = list(phot.pb)
else:
pbnames = []
# get the throughput model
pbs = passband.get_pbmodel(pbnames, model, pbfile=pbfile)
##### MINUIT #####
outfile = io.get_outfile(outdir, specfile, '_params.json', check=True, redo=redo, resume=resume)
if not resume:
# to avoid minuit messing up inputs, it can be skipped entirely to force the MCMC to start at a specific position
if not args.skipminuit:
# do a quick fit to refine the input params
migrad_params = fit.quick_fit_spec_model(spec, model, params)
# save the minuit fit result - this will not be perfect, but if it's bad, refine starting position
viz.plot_minuit_spectrum_fit(spec, objname, outdir, specfile, scale_factor,\
model, migrad_params, save=True)
else:
# we didn't run minuit, so we'll assume the user intended to start us at some specific position
migrad_params = io.copy_params(params)
if covtype == 'White':
migrad_params['fsig']['value'] = 0.
migrad_params['fsig']['fixed'] = True
migrad_params['tau']['fixed'] = True
# If we don't have a user supplied initial guess of mu, get a guess
migrad_params = fit.hyper_param_guess(spec, phot, model, pbs, migrad_params)
# write out the migrad params - note that if you skipminuit, you are expected to provide the dl value
# if skipmcmc is set, you can now run the code with MPI
io.write_params(migrad_params, outfile)
else:
try:
migrad_params = io.read_params(outfile)
except (OSError,IOError) as e:
message = '{}\nMust run fit to generate inputs before attempting to resume'.format(e)
raise RuntimeError(message)
# init a covariance model instance that's used to model the residuals
# between the systematic residuals between data and model
errscale = np.median(spec.flux_err)
covmodel = covariance.WDmodel_CovModel(errscale, covtype, coveps)
##### MCMC #####
# skipmcmc can be run to just prepare the inputs
if not args.skipmcmc:
# do the fit
result = fit.fit_model(spec, phot, model, covmodel, pbs, migrad_params,\
objname, outdir, specfile,\
phot_dispersion=phot_dispersion,\
samptype=samptype, ascale=ascale,\
ntemps=ntemps, nwalkers=nwalkers, nburnin=nburnin, nprod=nprod,\
thin=thin, everyn=everyn,\
redo=redo, resume=resume,\
pool=pool)
param_names, samples, samples_lnprob, everyn, shape = result
ntemps, nwalkers, nprod, nparam = shape
mcmc_params = io.copy_params(migrad_params)
# parse the samples in the chain and get the result
result = fit.get_fit_params_from_samples(param_names, samples, samples_lnprob, mcmc_params,\
ntemps=ntemps, nwalkers=nwalkers, nprod=nprod, discard=discard)
mcmc_params, in_samp, in_lnprob = result
# write the result to a file
outfile = io.get_outfile(outdir, specfile, '_result.json')
io.write_params(mcmc_params, outfile)
# plot the MCMC output
plot_out = viz.plot_mcmc_model(spec, phot, linedata,\
scale_factor, phot_dispersion,\
objname, outdir, specfile,\
model, covmodel, cont_model, pbs,\
mcmc_params, param_names, in_samp, in_lnprob,\
covtype=covtype, balmer=balmer,\
ndraws=ndraws, everyn=everyn, savefig=savefig)
model_spec, full_mod, model_mags = plot_out
spec_model_file = io.get_outfile(outdir, specfile, '_spec_model.dat')
io.write_spectrum_model(spec, model_spec, spec_model_file)
full_model_file = io.get_outfile(outdir, specfile, '_full_model.hdf5')
io.write_full_model(full_mod, full_model_file)
if phot is not None:
phot_model_file = io.get_outfile(outdir, specfile, '_phot_model.dat')
io.write_phot_model(phot, model_mags, phot_model_file)
return
|
gpl-3.0
| 8,631,514,802,882,615,000
| 37.754153
| 121
| 0.61123
| false
| 3.733995
| true
| false
| false
|
drfraser/django-paypal
|
paypal/standard/forms.py
|
1
|
10801
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils import timezone
from paypal.standard.widgets import ValueHiddenInput, ReservedValueHiddenInput
from paypal.standard.conf import (POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT,
IMAGE, SUBSCRIPTION_IMAGE, DONATION_IMAGE,
SANDBOX_IMAGE, SUBSCRIPTION_SANDBOX_IMAGE, DONATION_SANDBOX_IMAGE)
log = logging.getLogger(__name__)
# 20:18:05 Jan 30, 2009 PST - PST timezone support is not included out of the box.
# PAYPAL_DATE_FORMAT = ("%H:%M:%S %b. %d, %Y PST", "%H:%M:%S %b %d, %Y PST",)
# PayPal dates have been spotted in the wild with these formats, beware!
PAYPAL_DATE_FORMATS = ["%H:%M:%S %b. %d, %Y PST",
"%H:%M:%S %b. %d, %Y PDT",
"%H:%M:%S %b %d, %Y PST",
"%H:%M:%S %b %d, %Y PDT",
"%H:%M:%S %d %b %Y PST", # IPN Tester
"%H:%M:%S %d %b %Y PDT", # formats
]
class PayPalDateTimeField(forms.DateTimeField):
input_formats = PAYPAL_DATE_FORMATS
def strptime(self, value, format):
dt = super(PayPalDateTimeField, self).strptime(value, format)
parts = format.split(" ")
if timezone.pytz and settings.USE_TZ:
if parts[-1] in ["PDT", "PST"]:
# PST/PDT is 'US/Pacific'
dt = timezone.make_aware(dt, timezone.pytz.timezone('US/Pacific'))
return dt
class PayPalPaymentsForm(forms.Form):
"""
Creates a PayPal Payments Standard "Buy It Now" button, configured for a
selling a single item with no shipping.
For a full overview of all the fields you can set (there is a lot!) see:
http://tinyurl.com/pps-integration
Usage:
>>> f = PayPalPaymentsForm(initial={'item_name':'Widget 001', ...})
>>> f.render()
u'<form action="https://www.paypal.com/cgi-bin/webscr" method="post"> ...'
"""
CMD_CHOICES = (
("_xclick", "Buy now or Donations"),
("_donations", "Donations"),
("_cart", "Shopping cart"),
("_xclick-subscriptions", "Subscribe")
)
SHIPPING_CHOICES = ((1, "No shipping"), (0, "Shipping"))
NO_NOTE_CHOICES = ((1, "No Note"), (0, "Include Note"))
RECURRING_PAYMENT_CHOICES = (
(1, "Subscription Payments Recur"),
(0, "Subscription payments do not recur")
)
REATTEMPT_ON_FAIL_CHOICES = (
(1, "reattempt billing on Failure"),
(0, "Do Not reattempt on failure")
)
BUY = 'buy'
SUBSCRIBE = 'subscribe'
DONATE = 'donate'
# Where the money goes.
business = forms.CharField(widget=ValueHiddenInput(), initial=settings.PAYPAL_RECEIVER_EMAIL)
# Item information.
amount = forms.IntegerField(widget=ValueHiddenInput())
item_name = forms.CharField(widget=ValueHiddenInput())
item_number = forms.CharField(widget=ValueHiddenInput())
quantity = forms.CharField(widget=ValueHiddenInput())
# Subscription Related.
a1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 Price
p1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 Duration
t1 = forms.CharField(widget=ValueHiddenInput()) # Trial 1 unit of Duration, default to Month
a2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 Price
p2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 Duration
t2 = forms.CharField(widget=ValueHiddenInput()) # Trial 2 unit of Duration, default to Month
a3 = forms.CharField(widget=ValueHiddenInput()) # Subscription Price
p3 = forms.CharField(widget=ValueHiddenInput()) # Subscription Duration
t3 = forms.CharField(widget=ValueHiddenInput()) # Subscription unit of Duration, default to Month
src = forms.CharField(widget=ValueHiddenInput()) # Is billing recurring? default to yes
sra = forms.CharField(widget=ValueHiddenInput()) # Reattempt billing on failed cc transaction
no_note = forms.CharField(widget=ValueHiddenInput())
# Can be either 1 or 2. 1 = modify or allow new subscription creation, 2 = modify only
modify = forms.IntegerField(widget=ValueHiddenInput()) # Are we modifying an existing subscription?
# Localization / PayPal Setup
lc = forms.CharField(widget=ValueHiddenInput())
page_style = forms.CharField(widget=ValueHiddenInput())
cbt = forms.CharField(widget=ValueHiddenInput())
# IPN control.
notify_url = forms.CharField(widget=ValueHiddenInput())
cancel_return = forms.CharField(widget=ValueHiddenInput())
return_url = forms.CharField(widget=ReservedValueHiddenInput(attrs={"name": "return"}))
custom = forms.CharField(widget=ValueHiddenInput())
invoice = forms.CharField(widget=ValueHiddenInput())
# Default fields.
cmd = forms.ChoiceField(widget=forms.HiddenInput(), initial=CMD_CHOICES[0][0])
charset = forms.CharField(widget=forms.HiddenInput(), initial="utf-8")
currency_code = forms.CharField(widget=forms.HiddenInput(), initial="USD")
no_shipping = forms.ChoiceField(widget=forms.HiddenInput(), choices=SHIPPING_CHOICES,
initial=SHIPPING_CHOICES[0][0])
def __init__(self, button_type="buy", *args, **kwargs):
super(PayPalPaymentsForm, self).__init__(*args, **kwargs)
self.button_type = button_type
if 'initial' in kwargs:
# Dynamically create, so we can support everything PayPal does.
for k, v in kwargs['initial'].items():
if k not in self.base_fields:
self.fields[k] = forms.CharField(label=k, widget=ValueHiddenInput(), initial=v)
def test_mode(self):
return getattr(settings, 'PAYPAL_TEST', True)
def get_endpoint(self):
"Returns the endpoint url for the form."
if self.test_mode():
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def render(self):
return mark_safe(u"""<form action="%s" method="post">
%s
<input type="image" src="%s" border="0" name="submit" alt="Buy it Now" />
</form>""" % (self.get_endpoint(), self.as_p(), self.get_image()))
def sandbox(self):
"Deprecated. Use self.render() instead."
import warnings
warnings.warn("""PaypalPaymentsForm.sandbox() is deprecated.
Use the render() method instead.""", DeprecationWarning)
return self.render()
def get_image(self):
return {
(True, self.SUBSCRIBE): SUBSCRIPTION_SANDBOX_IMAGE,
(True, self.BUY): SANDBOX_IMAGE,
(True, self.DONATE): DONATION_SANDBOX_IMAGE,
(False, self.SUBSCRIBE): SUBSCRIPTION_IMAGE,
(False, self.BUY): IMAGE,
(False, self.DONATE): DONATION_IMAGE,
}[self.test_mode(), self.button_type]
def is_transaction(self):
return not self.is_subscription()
def is_donation(self):
return self.button_type == self.DONATE
def is_subscription(self):
return self.button_type == self.SUBSCRIBE
class PayPalEncryptedPaymentsForm(PayPalPaymentsForm):
"""
Creates a PayPal Encrypted Payments "Buy It Now" button.
Requires the M2Crypto package.
Based on example at:
http://blog.mauveweb.co.uk/2007/10/10/paypal-with-django/
"""
def _encrypt(self):
"""Use your key thing to encrypt things."""
from M2Crypto import BIO, SMIME, X509
# @@@ Could we move this to conf.py?
CERT = settings.PAYPAL_PRIVATE_CERT
PUB_CERT = settings.PAYPAL_PUBLIC_CERT
PAYPAL_CERT = settings.PAYPAL_CERT
CERT_ID = settings.PAYPAL_CERT_ID
# Iterate through the fields and pull out the ones that have a value.
plaintext = 'cert_id=%s\n' % CERT_ID
for name, field in self.fields.items():
value = None
if name in self.initial:
value = self.initial[name]
elif field.initial is not None:
value = field.initial
if value is not None:
# @@@ Make this less hackish and put it in the widget.
if name == "return_url":
name = "return"
plaintext += u'%s=%s\n' % (name, value)
plaintext = plaintext.encode('utf-8')
# Begin crypto weirdness.
s = SMIME.SMIME()
s.load_key_bio(BIO.openfile(CERT), BIO.openfile(PUB_CERT))
p7 = s.sign(BIO.MemoryBuffer(plaintext), flags=SMIME.PKCS7_BINARY)
x509 = X509.load_cert_bio(BIO.openfile(PAYPAL_CERT))
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp = BIO.MemoryBuffer()
p7.write_der(tmp)
p7 = s.encrypt(tmp, flags=SMIME.PKCS7_BINARY)
out = BIO.MemoryBuffer()
p7.write(out)
return out.read()
def as_p(self):
return mark_safe(u"""
<input type="hidden" name="cmd" value="_s-xclick" />
<input type="hidden" name="encrypted" value="%s" />
""" % self._encrypt())
class PayPalSharedSecretEncryptedPaymentsForm(PayPalEncryptedPaymentsForm):
"""
Creates a PayPal Encrypted Payments "Buy It Now" button with a Shared Secret.
Shared secrets should only be used when your IPN endpoint is on HTTPS.
Adds a secret to the notify_url based on the contents of the form.
"""
def __init__(self, *args, **kwargs):
"Make the secret from the form initial data and slip it into the form."
from paypal.standard.helpers import make_secret
super(PayPalSharedSecretEncryptedPaymentsForm, self).__init__(*args, **kwargs)
# @@@ Attach the secret parameter in a way that is safe for other query params.
secret_param = "?secret=%s" % make_secret(self)
# Initial data used in form construction overrides defaults
if 'notify_url' in self.initial:
self.initial['notify_url'] += secret_param
else:
self.fields['notify_url'].initial += secret_param
class PayPalStandardBaseForm(forms.ModelForm):
"""Form used to receive and record PayPal IPN/PDT."""
# PayPal dates have non-standard formats.
time_created = PayPalDateTimeField(required=False)
payment_date = PayPalDateTimeField(required=False)
next_payment_date = PayPalDateTimeField(required=False)
subscr_date = PayPalDateTimeField(required=False)
subscr_effective = PayPalDateTimeField(required=False)
retry_at = PayPalDateTimeField(required=False)
case_creation_date = PayPalDateTimeField(required=False)
auction_closing_date = PayPalDateTimeField(required=False)
|
mit
| -8,756,174,441,328,358,000
| 39.912879
| 103
| 0.634386
| false
| 3.738664
| false
| false
| false
|
CaliopeProject/CaliopeServer
|
src/cid/forms/siim2/Company/models.py
|
1
|
1821
|
# -*- encoding: utf-8 -*-
"""
@authors: Nelson Daniel Ochoa ndaniel8a@gmail.com
Sebastián Ortiz V. neoecos@gmail.com
@license: GNU AFFERO GENERAL PUBLIC LICENSE
SIIM Models are the data definition of SIIM2 Framework
Copyright (C) 2013 Infometrika Ltda.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#Caliope Entities
from cid.core.forms import FormNode
from cid.core.entities import (VersionedNode,
ZeroOrMore,
RelationshipTo,
StringProperty)
class Company(FormNode):
#: Número de documento o identificacion
number_identification = StringProperty()
#: Dígito de verificacion del numero de identificacion
digit_verification = StringProperty()
#: Nombre o razón social
name = StringProperty()
#: Sigla
initial = StringProperty()
#: Representante legal
legal_representative = RelationshipTo(VersionedNode, 'IS_IN', cardinality=ZeroOrMore)
#: Teléfono
telephone = StringProperty()
#: Dirección
address = RelationshipTo(VersionedNode, 'IS_IN', cardinality=ZeroOrMore)
#: Correo electrónico
email = StringProperty()
|
agpl-3.0
| -4,010,117,498,692,730,000
| 36.040816
| 89
| 0.698456
| false
| 4.085586
| false
| false
| false
|
jdotjdot/django-apptemplates
|
apptemplates/__init__.py
|
1
|
2709
|
"""
Django template loader that allows you to load a template from a specific
Django application.
"""
from os.path import dirname, join, abspath
from django.conf import settings
from django.template.base import Origin
from django.template.loaders.filesystem import Loader as FilesystemLoader
try:
from importlib import import_module # noqa pylint: disable=wrong-import-order,no-name-in-module
except ImportError: # Python < 2.7
from django.utils.importlib import import_module # noqa pylint: disable=no-name-in-module,import-error
import django
_cache = {}
def get_app_template_dir(app_name):
"""
Get the template directory for an application
We do not use django.db.models.get_app, because this will fail if an
app does not have any models.
Returns a full path, or None if the app was not found.
"""
if app_name in _cache:
return _cache[app_name]
template_dir = None
for app in settings.INSTALLED_APPS:
if app.split('.')[-1] == app_name:
# Do not hide import errors; these should never happen at this
# point anyway
mod = import_module(app)
template_dir = join(abspath(dirname(mod.__file__)), 'templates')
break
_cache[app_name] = template_dir
return template_dir
if django.VERSION[:2] >= (1, 9):
def get_template_path(template_dir, template_name, loader=None):
"""Return Origin object with template file path"""
return Origin(name=join(template_dir, template_name),
template_name=template_name,
loader=loader)
else:
def get_template_path(template_dir, template_name, loader=None):
"""Return template file path (for Django < 1.9)"""
_ = loader # noqa
return join(template_dir, template_name)
class Loader(FilesystemLoader):
"""
FilesystemLoader for templates of a Django app
"""
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Return the absolute paths to "template_name" in the specified app
If the name does not contain an app name (no colon), an empty list
is returned.
The parent FilesystemLoader.load_template_source() will take care
of the actual loading for us.
"""
if ':' not in template_name:
return []
app_name, template_name = template_name.split(":", 1)
template_dir = get_app_template_dir(app_name)
if template_dir:
return [get_template_path(template_dir, template_name, self)]
else:
return []
|
mit
| -5,958,066,998,723,027,000
| 34.12
| 107
| 0.627907
| false
| 4.104545
| false
| false
| false
|
browseinfo/odoo_saas3_nicolas
|
addons/website_quote/controllers/main.py
|
1
|
8818
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models import website
import werkzeug
import datetime
import time
from openerp.tools.translate import _
class sale_quote(http.Controller):
@http.route([
"/quote/<int:order_id>",
"/quote/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
if token:
if token != order.access_token:
return request.website.render('website.404')
# Log only once a day
if request.httprequest.session.get('view_quote',False)!=now:
request.httprequest.session['view_quote'] = now
body=_('Quotation viewed by customer')
self.__message_post(body, order_id, type='comment')
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0)
}
return request.website.render('website_quote.so_quotation', values)
@http.route(['/quote/accept'], type='json', auth="public", website=True)
def accept(self, order_id=None, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_order_confirm(request.cr, SUPERUSER_ID, [order_id], context=request.context)
message = _('Order signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return True
@http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token))
@http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
if token != order.access_token:
return request.website.render('website.404')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token))
def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=attachments
)
request.session.body = False
return True
@http.route(['/quote/update_line'], type='json', auth="public", website=True)
def update(self, line_id=None, remove=False, unlink=False, order_id=None, token=None, **post):
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id))
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ('draft','sent'):
return False
line_id=int(line_id)
if unlink:
request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context)
return False
number=(remove and -1 or 1)
order_line_obj = request.registry.get('sale.order.line')
order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0]
quantity = order_line_val['product_uom_qty'] + number
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context)
return [str(quantity), str(order.amount_total)]
@http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True, multilang=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('website_quote.so_template', values)
@http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True)
def add(self, option_id, order_id, token, **post):
vals = {}
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
option_obj = request.registry.get('sale.order.option')
option = option_obj.browse(request.cr, SUPERUSER_ID, option_id)
res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id,
False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id,
option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'),
False, order.fiscal_position.id, True, request.context)
vals = res.get('value', {})
if 'tax_id' in vals:
vals['tax_id'] = [(6, 0, vals['tax_id'])]
vals.update({
'price_unit': option.price_unit,
'website_description': option.website_description,
'name': option.name,
'order_id': order.id,
'product_id' : option.product_id.id,
'product_uos_qty': option.quantity,
'product_uos': option.uom_id.id,
'product_uom_qty': option.quantity,
'product_uom': option.uom_id.id,
'discount': option.discount,
})
line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context)
option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context)
return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
|
agpl-3.0
| 2,026,008,436,371,959,300
| 50.567251
| 129
| 0.61658
| false
| 3.817316
| false
| false
| false
|
dgoodwin/rounder
|
src/rounder/ui/gtk/main.py
|
1
|
11733
|
# encoding=utf-8
#
# Rounder - Poker for the GNOME Desktop
#
# Copyright (C) 2008 Devan Goodwin <dgoodwin@dangerouslyinc.com>
# Copyright (C) 2008 James Bowes <jbowes@dangerouslyinc.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" The Rounder GTK Client """
import subprocess
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
from logging import getLogger
logger = getLogger("rounder.ui.gtk.main")
from twisted.internet import gtk2reactor
gtk2reactor.install()
from rounder.network.client import RounderNetworkClient
from rounder.network.serialize import register_message_classes
from rounder.ui.client import Client
from rounder.ui.gtk.util import find_file_on_path
from rounder.ui.gtk.table import TableWindow
ROUNDER_LOGO_FILE = "rounder/ui/gtk/data/rounder-logo.png"
ROUNDER_ICON_FILE = "rounder/ui/gtk/data/rounder-icon.svg"
def connect(host, port, username, password, app):
# Attempt to connect to the specified server by creating a client
# object. If successful pass the client back to the main application,
# otherwise display an error status message and let the user try
# again:
client = RounderNetworkClient(app)
try:
client.connect(host, port, username, password)
except Exception, e:
logger.error("Unable to login to %s as %s" % (host, username))
class RounderGtk(Client):
"""
The Rounder GTK Client
Represents the main Rounder interface to connect to a server, view
available tables, and join them. (opening a separate window)
"""
def __init__(self, host=None, port=None, username=None, password=None):
logger.info("Starting rounder.")
logger.debug("Initial connection Info:\n"
" host = %s\n"
" port = %s\n"
" username = %s\n"
" password = %s", host, port, username, password)
register_message_classes()
glade_file = 'rounder/ui/gtk/data/rounder.glade'
self.glade_xml = gtk.glade.XML(find_file_on_path(glade_file))
main_window = self.glade_xml.get_widget('main-window')
main_window.set_icon_from_file(find_file_on_path(ROUNDER_ICON_FILE))
self.table_list = self.glade_xml.get_widget('table-list')
self.statusbar = self.glade_xml.get_widget('statusbar')
self.connect_button = self.glade_xml.get_widget('connect-button')
logo = self.glade_xml.get_widget("rounder-logo-image")
logo.set_from_file(find_file_on_path(ROUNDER_LOGO_FILE))
signals = {
'on_connect_activate': self.show_connect_dialog,
'on_close_activate': self.shutdown,
'on_main_window_destroy': self.shutdown,
'on_connect_button_clicked': self.show_connect_dialog,
'on_quit_button_clicked': self.shutdown,
'on_table_list_row_activated': self.open_table,
'on_about1_activate': self.open_about_window,
}
self.glade_xml.signal_autoconnect(signals)
treeselection = self.table_list.get_selection()
treeselection.set_mode(gtk.SELECTION_SINGLE)
# Reference to a network client.
self.client = None
self.connect_dialog = None # Set once connect dialog is open
self.set_status("Connect to a server to begin playing.")
main_window.show_all()
# Autoconnect if given details, otherwise show connect dialog:
if host != None and port != None and username != None and \
password != None:
connect(host, port, username, password, self)
else:
self.show_connect_dialog(None)
def main(self):
""" Launch the GTK main loop. """
gtk.main()
def shutdown(self, widget):
""" Closes the application. """
if self.client != None:
self.client.shutdown()
logger.info("Stopping application.")
gtk.main_quit()
def open_table(self, treeview, row, column):
"""
Open a table window.
Connected to the table list and called when the user selected a table
to join.
"""
logger.info("Opening table window")
model = treeview.get_model()
logger.debug("row clicked: %s\n"
"table id: %s\n"
"table name: %s", row[0], model[row][0], model[row][1])
self.client.open_table(model[row][0])
def open_table_success(self, client_table):
table_win = TableWindow(self, client_table)
def show_connect_dialog(self, widget):
""" Opens the connect to server dialog. """
if self.connect_dialog == None:
self.connect_dialog = ConnectDialog(self)
else:
logger.debug("Connect dialog already open.")
def connect_success(self, client):
"""
Callback used by the connect dialog after a connection to a server
has been successfully made.
"""
logger.info("Connected to %s:%s as %s" % (client.host, client.port,
client.username))
self.client = client
# Call also sets our reference to None:
if self.connect_dialog != None:
self.connect_dialog.destroy(None, None, None)
self.connect_button.set_sensitive(False)
self.set_status("Connected to server: %s" % client.host)
server_label = self.glade_xml.get_widget('server-label')
server_label.set_text(client.host)
username_label = self.glade_xml.get_widget('username-label')
username_label.set_text(client.username)
self.client.get_table_list()
def connect_failure(self):
""" Connection failed callback. """
logger.warn("Connect failed")
self.connect_dialog.set_status("Login failed.")
def list_tables_success(self, table_listings):
"""
Populate the list of tables in the main server window.
GTK TreeView's aren't fun but this works in conjunction with the
__cell_* methods to populate the columns.
"""
logger.debug("Populating table list")
column_names = ["Table ID", "Name", "Limit", "Players"]
cell_data_funcs = [self.__cell_table_id, self.__cell_table,
self.__cell_limit, self.__cell_players]
tables = gtk.ListStore(int, str, str, str)
for table in table_listings:
tables.append([table.id, table.name, table.limit,
table.player_count])
columns = [None] * len(column_names)
# Populate the table columns and cells:
for n in range(0, len(column_names)):
cell = gtk.CellRendererText()
columns[n] = gtk.TreeViewColumn(column_names[n], cell)
columns[n].set_cell_data_func(cell, cell_data_funcs[n])
self.table_list.append_column(columns[n])
self.table_list.set_model(tables)
@staticmethod
def _open_url(dialog, url, data):
subprocess.call(['xdg-open', url])
@staticmethod
def _open_email(dialog, email, data):
subprocess.call(['xdg-email', email])
def open_about_window(self, menuitem):
gtk.about_dialog_set_url_hook(self._open_url, None)
gtk.about_dialog_set_email_hook(self._open_email, None)
about = gtk.AboutDialog()
about.set_name("Rounder")
about.set_version("0.0.1")
about.set_copyright("Copyright © 2008 Devan Goodwin & James Bowes")
about.set_comments("Poker for the GNOME Desktop")
# XXX Put the full license in here
about.set_license("GPLv2")
about.set_website("http://dangerouslyinc.com")
about.set_website_label("http://dangerouslyinc.com")
about.set_authors(('Devan Goodwin <dgoodwin@dangerouslyinc.com>',
'James Bowes <jbowes@dangerouslyinc.com>',
'Kenny MacDermid <kenny@kmdconsulting.ca>'))
about.set_artists(('Anette Goodwin <anette.goodwin@gmail.com>',
'James Bowes <jbowes@dangerouslyinc.com>'))
about.set_logo(gtk.gdk.pixbuf_new_from_file(
find_file_on_path(ROUNDER_LOGO_FILE)))
about.set_icon_from_file(find_file_on_path(ROUNDER_ICON_FILE))
about.connect('response', lambda x, y: about.destroy())
about.show_all()
def __cell_table_id(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 0))
def __cell_table(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 1))
def __cell_limit(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 2))
def __cell_players(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 3))
def set_status(self, message):
""" Display a message in the main window's status bar. """
self.statusbar.push(self.statusbar.get_context_id("Rounder"), message)
self.statusbar.show()
class ConnectDialog(object):
""" Dialog for connecting to a server. """
def __init__(self, app):
logger.debug("Opening connect dialog.")
self.app = app
glade_file = 'rounder/ui/gtk/data/connect.glade'
self.glade_xml = gtk.glade.XML(find_file_on_path(glade_file))
self.connect_dialog = self.glade_xml.get_widget('connect-dialog')
self.connect_dialog.set_icon_from_file(
find_file_on_path(ROUNDER_ICON_FILE))
signals = {
'on_connect_button_clicked': self.connect,
}
self.glade_xml.signal_autoconnect(signals)
self.connect_dialog.connect("delete_event", self.destroy)
self.connect_dialog.show_all()
def connect(self, widget):
""" Attempt to open a connection to the host and port specified. """
host_entry = self.glade_xml.get_widget('host-entry')
host = host_entry.get_text()
port_spinbutton = self.glade_xml.get_widget('port-spinbutton')
port = port_spinbutton.get_value_as_int()
username_entry = self.glade_xml.get_widget('username-entry')
username = username_entry.get_text()
password_entry = self.glade_xml.get_widget('password-entry')
password = password_entry.get_text()
logger.debug("Connecting to %s on port %s"
"\n as: %s / %s", host, port, username, password)
connect(host, port, username, password, self.app)
def set_status(self, message):
""" Display a message in the connect dialog's status bar. """
statusbar = self.glade_xml.get_widget('statusbar')
statusbar.push(statusbar.get_context_id("Connect Dialog"), message)
statusbar.show()
def destroy(self, widget, event, data=None):
"""
Called by main Rounder application who receives the success callback
from the network client.
"""
logger.debug("Closing connect dialog.")
self.app.connect_dialog = None
self.connect_dialog.destroy()
|
gpl-2.0
| -6,898,350,797,561,719,000
| 36.244444
| 78
| 0.629304
| false
| 3.733927
| false
| false
| false
|
rflamary/POT
|
ot/stochastic.py
|
1
|
24589
|
"""
Stochastic solvers for regularized OT.
"""
# Author: Kilian Fatras <kilian.fatras@gmail.com>
#
# License: MIT License
import numpy as np
##############################################################################
# Optimization toolbox for SEMI - DUAL problems
##############################################################################
def coordinate_grad_semi_dual(b, M, reg, beta, i):
r'''
Compute the coordinate gradient update for regularized discrete distributions for (i, :)
The function computes the gradient of the semi dual problem:
.. math::
\max_v \sum_i (\sum_j v_j * b_j - reg * log(\sum_j exp((v_j - M_{i,j})/reg) * b_j)) * a_i
Where :
- M is the (ns,nt) metric cost matrix
- v is a dual variable in R^J
- reg is the regularization term
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the ASGD & SAG algorithms
as proposed in [18]_ [alg.1 & alg.2]
Parameters
----------
b : ndarray, shape (nt,)
Target measure.
M : ndarray, shape (ns, nt)
Cost matrix.
reg : float
Regularization term > 0.
v : ndarray, shape (nt,)
Dual variable.
i : int
Picked number i.
Returns
-------
coordinate gradient : ndarray, shape (nt,)
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000)
array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06],
[1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03],
[3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07],
[2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04],
[9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01],
[2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01],
[4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]])
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527.
'''
r = M[i, :] - beta
exp_beta = np.exp(-r / reg) * b
khi = exp_beta / (np.sum(exp_beta))
return b - khi
def sag_entropic_transport(a, b, M, reg, numItermax=10000, lr=None):
r'''
Compute the SAG algorithm to solve the regularized discrete measures
optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1 = b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the SAG algorithm
as proposed in [18]_ [alg.1]
Parameters
----------
a : ndarray, shape (ns,),
Source measure.
b : ndarray, shape (nt,),
Target measure.
M : ndarray, shape (ns, nt),
Cost matrix.
reg : float
Regularization term > 0
numItermax : int
Number of iteration.
lr : float
Learning rate.
Returns
-------
v : ndarray, shape (nt,)
Dual variable.
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000)
array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06],
[1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03],
[3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07],
[2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04],
[9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01],
[2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01],
[4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]])
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527.
'''
if lr is None:
lr = 1. / max(a / reg)
n_source = np.shape(M)[0]
n_target = np.shape(M)[1]
cur_beta = np.zeros(n_target)
stored_gradient = np.zeros((n_source, n_target))
sum_stored_gradient = np.zeros(n_target)
for _ in range(numItermax):
i = np.random.randint(n_source)
cur_coord_grad = a[i] * coordinate_grad_semi_dual(b, M, reg,
cur_beta, i)
sum_stored_gradient += (cur_coord_grad - stored_gradient[i])
stored_gradient[i] = cur_coord_grad
cur_beta += lr * (1. / n_source) * sum_stored_gradient
return cur_beta
def averaged_sgd_entropic_transport(a, b, M, reg, numItermax=300000, lr=None):
r'''
Compute the ASGD algorithm to solve the regularized semi continous measures optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the ASGD algorithm
as proposed in [18]_ [alg.2]
Parameters
----------
b : ndarray, shape (nt,)
target measure
M : ndarray, shape (ns, nt)
cost matrix
reg : float
Regularization term > 0
numItermax : int
Number of iteration.
lr : float
Learning rate.
Returns
-------
ave_v : ndarray, shape (nt,)
dual variable
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000)
array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06],
[1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03],
[3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07],
[2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04],
[9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01],
[2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01],
[4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]])
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527.
'''
if lr is None:
lr = 1. / max(a / reg)
n_source = np.shape(M)[0]
n_target = np.shape(M)[1]
cur_beta = np.zeros(n_target)
ave_beta = np.zeros(n_target)
for cur_iter in range(numItermax):
k = cur_iter + 1
i = np.random.randint(n_source)
cur_coord_grad = coordinate_grad_semi_dual(b, M, reg, cur_beta, i)
cur_beta += (lr / np.sqrt(k)) * cur_coord_grad
ave_beta = (1. / k) * cur_beta + (1 - 1. / k) * ave_beta
return ave_beta
def c_transform_entropic(b, M, reg, beta):
r'''
The goal is to recover u from the c-transform.
The function computes the c_transform of a dual variable from the other
dual variable:
.. math::
u = v^{c,reg} = -reg \sum_j exp((v - M)/reg) b_j
Where :
- M is the (ns,nt) metric cost matrix
- u, v are dual variables in R^IxR^J
- reg is the regularization term
It is used to recover an optimal u from optimal v solving the semi dual
problem, see Proposition 2.1 of [18]_
Parameters
----------
b : ndarray, shape (nt,)
Target measure
M : ndarray, shape (ns, nt)
Cost matrix
reg : float
Regularization term > 0
v : ndarray, shape (nt,)
Dual variable.
Returns
-------
u : ndarray, shape (ns,)
Dual variable.
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000)
array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06],
[1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03],
[3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07],
[2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04],
[9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01],
[2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01],
[4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]])
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527.
'''
n_source = np.shape(M)[0]
alpha = np.zeros(n_source)
for i in range(n_source):
r = M[i, :] - beta
min_r = np.min(r)
exp_beta = np.exp(-(r - min_r) / reg) * b
alpha[i] = min_r - reg * np.log(np.sum(exp_beta))
return alpha
def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None,
log=False):
r'''
Compute the transportation matrix to solve the regularized discrete
measures optimal transport max problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the SAG or ASGD algorithms
as proposed in [18]_
Parameters
----------
a : ndarray, shape (ns,)
source measure
b : ndarray, shape (nt,)
target measure
M : ndarray, shape (ns, nt)
cost matrix
reg : float
Regularization term > 0
methode : str
used method (SAG or ASGD)
numItermax : int
number of iteration
lr : float
learning rate
n_source : int
size of the source measure
n_target : int
size of the target measure
log : bool, optional
record log if True
Returns
-------
pi : ndarray, shape (ns, nt)
transportation matrix
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> ot.stochastic.solve_semi_dual_entropic(a, b, M, reg=1, method="ASGD", numItermax=300000)
array([[2.53942342e-02, 9.98640673e-02, 1.75945647e-02, 4.27664307e-06],
[1.21556999e-01, 1.26350515e-02, 1.30491795e-03, 7.36017394e-03],
[3.54070702e-03, 7.63581358e-02, 6.29581672e-02, 1.32812798e-07],
[2.60578198e-02, 3.35916645e-02, 8.28023223e-02, 4.05336238e-04],
[9.86808864e-03, 7.59774324e-04, 1.08702729e-02, 1.21359007e-01],
[2.17218856e-02, 9.12931802e-04, 1.87962526e-03, 1.18342700e-01],
[4.14237512e-02, 2.67487857e-02, 7.23016955e-02, 2.38291052e-03]])
References
----------
[Genevay et al., 2016] :
Stochastic Optimization for Large-scale Optimal Transport,
Advances in Neural Information Processing Systems (2016),
arXiv preprint arxiv:1605.08527.
'''
if method.lower() == "sag":
opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr)
elif method.lower() == "asgd":
opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr)
else:
print("Please, select your method between SAG and ASGD")
return None
opt_alpha = c_transform_entropic(b, M, reg, opt_beta)
pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) *
a[:, None] * b[None, :])
if log:
log = {}
log['alpha'] = opt_alpha
log['beta'] = opt_beta
return pi, log
else:
return pi
##############################################################################
# Optimization toolbox for DUAL problems
##############################################################################
def batch_grad_dual(a, b, M, reg, alpha, beta, batch_size, batch_alpha,
batch_beta):
r'''
Computes the partial gradient of the dual optimal transport problem.
For each (i,j) in a batch of coordinates, the partial gradients are :
.. math::
\partial_{u_i} F = u_i * b_s/l_{v} - \sum_{j \in B_v} exp((u_i + v_j - M_{i,j})/reg) * a_i * b_j
\partial_{v_j} F = v_j * b_s/l_{u} - \sum_{i \in B_u} exp((u_i + v_j - M_{i,j})/reg) * a_i * b_j
Where :
- M is the (ns,nt) metric cost matrix
- u, v are dual variables in R^ixR^J
- reg is the regularization term
- :math:`B_u` and :math:`B_v` are lists of index
- :math:`b_s` is the size of the batchs :math:`B_u` and :math:`B_v`
- :math:`l_u` and :math:`l_v` are the lenghts of :math:`B_u` and :math:`B_v`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the dual problem is the SGD algorithm
as proposed in [19]_ [alg.1]
Parameters
----------
a : ndarray, shape (ns,)
source measure
b : ndarray, shape (nt,)
target measure
M : ndarray, shape (ns, nt)
cost matrix
reg : float
Regularization term > 0
alpha : ndarray, shape (ns,)
dual variable
beta : ndarray, shape (nt,)
dual variable
batch_size : int
size of the batch
batch_alpha : ndarray, shape (bs,)
batch of index of alpha
batch_beta : ndarray, shape (bs,)
batch of index of beta
Returns
-------
grad : ndarray, shape (ns,)
partial grad F
Examples
--------
>>> import ot
>>> np.random.seed(0)
>>> n_source = 7
>>> n_target = 4
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> X_source = np.random.randn(n_source, 2)
>>> Y_target = np.random.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg=1, batch_size=3, numItermax=30000, lr=0.1, log=True)
>>> log['alpha']
array([0.71759102, 1.57057384, 0.85576566, 0.1208211 , 0.59190466,
1.197148 , 0.17805133])
>>> log['beta']
array([0.49741367, 0.57478564, 1.40075528, 2.75890102])
>>> sgd_dual_pi
array([[2.09730063e-02, 8.38169324e-02, 7.50365455e-03, 8.72731415e-09],
[5.58432437e-03, 5.89881299e-04, 3.09558411e-05, 8.35469849e-07],
[3.26489515e-03, 7.15536035e-02, 2.99778211e-02, 3.02601593e-10],
[4.05390622e-02, 5.31085068e-02, 6.65191787e-02, 1.55812785e-06],
[7.82299812e-02, 6.12099102e-03, 4.44989098e-02, 2.37719187e-03],
[5.06266486e-02, 2.16230494e-03, 2.26215141e-03, 6.81514609e-04],
[6.06713990e-02, 3.98139808e-02, 5.46829338e-02, 8.62371424e-06]])
References
----------
[Seguy et al., 2018] :
International Conference on Learning Representation (2018),
arXiv preprint arxiv:1711.02283.
'''
G = - (np.exp((alpha[batch_alpha, None] + beta[None, batch_beta] -
M[batch_alpha, :][:, batch_beta]) / reg) *
a[batch_alpha, None] * b[None, batch_beta])
grad_beta = np.zeros(np.shape(M)[1])
grad_alpha = np.zeros(np.shape(M)[0])
grad_beta[batch_beta] = (b[batch_beta] * len(batch_alpha) / np.shape(M)[0]
+ G.sum(0))
grad_alpha[batch_alpha] = (a[batch_alpha] * len(batch_beta)
/ np.shape(M)[1] + G.sum(1))
return grad_alpha, grad_beta
def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr):
r'''
Compute the sgd algorithm to solve the regularized discrete measures
optimal transport dual problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
Parameters
----------
a : ndarray, shape (ns,)
source measure
b : ndarray, shape (nt,)
target measure
M : ndarray, shape (ns, nt)
cost matrix
reg : float
Regularization term > 0
batch_size : int
size of the batch
numItermax : int
number of iteration
lr : float
learning rate
Returns
-------
alpha : ndarray, shape (ns,)
dual variable
beta : ndarray, shape (nt,)
dual variable
Examples
--------
>>> import ot
>>> n_source = 7
>>> n_target = 4
>>> reg = 1
>>> numItermax = 20000
>>> lr = 0.1
>>> batch_size = 3
>>> log = True
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> rng = np.random.RandomState(0)
>>> X_source = rng.randn(n_source, 2)
>>> Y_target = rng.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log)
>>> log['alpha']
array([0.64171798, 1.27932201, 0.78132257, 0.15638935, 0.54888354,
1.03663469, 0.20595781])
>>> log['beta']
array([0.51207194, 0.58033189, 1.28922676, 2.26859736])
>>> sgd_dual_pi
array([[1.97276541e-02, 7.81248547e-02, 6.22136048e-03, 4.95442423e-09],
[4.23494310e-03, 4.43286263e-04, 2.06927079e-05, 3.82389139e-07],
[3.07542414e-03, 6.67897769e-02, 2.48904999e-02, 1.72030247e-10],
[4.26271990e-02, 5.53375455e-02, 6.16535024e-02, 9.88812650e-07],
[7.60423265e-02, 5.89585256e-03, 3.81267087e-02, 1.39458256e-03],
[4.37557504e-02, 1.85189176e-03, 1.72335760e-03, 3.55491279e-04],
[6.33096109e-02, 4.11683954e-02, 5.02962051e-02, 5.43097516e-06]])
References
----------
[Seguy et al., 2018] :
International Conference on Learning Representation (2018),
arXiv preprint arxiv:1711.02283.
'''
n_source = np.shape(M)[0]
n_target = np.shape(M)[1]
cur_alpha = np.zeros(n_source)
cur_beta = np.zeros(n_target)
for cur_iter in range(numItermax):
k = np.sqrt(cur_iter + 1)
batch_alpha = np.random.choice(n_source, batch_size, replace=False)
batch_beta = np.random.choice(n_target, batch_size, replace=False)
update_alpha, update_beta = batch_grad_dual(a, b, M, reg, cur_alpha,
cur_beta, batch_size,
batch_alpha, batch_beta)
cur_alpha[batch_alpha] += (lr / k) * update_alpha[batch_alpha]
cur_beta[batch_beta] += (lr / k) * update_beta[batch_beta]
return cur_alpha, cur_beta
def solve_dual_entropic(a, b, M, reg, batch_size, numItermax=10000, lr=1,
log=False):
r'''
Compute the transportation matrix to solve the regularized discrete measures
optimal transport dual problem
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma \geq 0
Where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
Parameters
----------
a : ndarray, shape (ns,)
source measure
b : ndarray, shape (nt,)
target measure
M : ndarray, shape (ns, nt)
cost matrix
reg : float
Regularization term > 0
batch_size : int
size of the batch
numItermax : int
number of iteration
lr : float
learning rate
log : bool, optional
record log if True
Returns
-------
pi : ndarray, shape (ns, nt)
transportation matrix
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> n_source = 7
>>> n_target = 4
>>> reg = 1
>>> numItermax = 20000
>>> lr = 0.1
>>> batch_size = 3
>>> log = True
>>> a = ot.utils.unif(n_source)
>>> b = ot.utils.unif(n_target)
>>> rng = np.random.RandomState(0)
>>> X_source = rng.randn(n_source, 2)
>>> Y_target = rng.randn(n_target, 2)
>>> M = ot.dist(X_source, Y_target)
>>> sgd_dual_pi, log = ot.stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log)
>>> log['alpha']
array([0.64057733, 1.2683513 , 0.75610161, 0.16024284, 0.54926534,
1.0514201 , 0.19958936])
>>> log['beta']
array([0.51372571, 0.58843489, 1.27993921, 2.24344807])
>>> sgd_dual_pi
array([[1.97377795e-02, 7.86706853e-02, 6.15682001e-03, 4.82586997e-09],
[4.19566963e-03, 4.42016865e-04, 2.02777272e-05, 3.68823708e-07],
[3.00379244e-03, 6.56562018e-02, 2.40462171e-02, 1.63579656e-10],
[4.28626062e-02, 5.60031599e-02, 6.13193826e-02, 9.67977735e-07],
[7.61972739e-02, 5.94609051e-03, 3.77886693e-02, 1.36046648e-03],
[4.44810042e-02, 1.89476742e-03, 1.73285847e-03, 3.51826036e-04],
[6.30118293e-02, 4.12398660e-02, 4.95148998e-02, 5.26247246e-06]])
References
----------
[Seguy et al., 2018] :
International Conference on Learning Representation (2018),
arXiv preprint arxiv:1711.02283.
'''
opt_alpha, opt_beta = sgd_entropic_regularization(a, b, M, reg, batch_size,
numItermax, lr)
pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) *
a[:, None] * b[None, :])
if log:
log = {}
log['alpha'] = opt_alpha
log['beta'] = opt_beta
return pi, log
else:
return pi
|
mit
| -1,453,941,329,542,792,700
| 31.568212
| 126
| 0.563057
| false
| 2.958609
| false
| false
| false
|
jmikkola/Lexington
|
src/lexington/__init__.py
|
1
|
3175
|
import collections
from werkzeug.wrappers import Response
from lexington.util import di
from lexington.util import route
from lexington.util import view_map
from lexington.util import paths
def default_dependencies(settings):
dependencies = di.Dependencies()
dependencies.register_value('settings', settings)
dependencies.register_value('respond', Response)
dependencies.register_late_bound_value('environ')
paths.register_all(dependencies)
return dependencies
def app():
"""
Helper function to construct the application factory(!)
"""
settings = {}
dependencies = default_dependencies(settings)
views = view_map.ViewMapFactory()
routes = route.Routes()
return ApplicationFactory(settings, dependencies, views, routes)
class ApplicationFactory:
def __init__(self, settings, dependencies, views, routes):
self._settings = settings
self._dependencies = dependencies
self._views = views
self._routes = routes
def add_route(self, route_name, method, path_description):
self._routes.add_route(route_name, method, path_description)
def add_view_fn(self, route_name, fn, dependencies=None):
if dependencies is None:
dependencies = []
view = view_map.View(fn, route_name, dependencies)
self.add_view(view)
def add_view(self, view):
self._views.add_view(view)
def add_value(self, name, value):
self._dependencies.register_value(name, value)
def add_factory(self, name, factory_fn, dependencies=None):
self._dependencies.register_factory(name, factory_fn, dependencies)
def create_app(self):
self._dependencies.check_dependencies()
routing = self._routes.get_routing()
view_map = self._views.create(
routing.get_names(),
self._dependencies.provided_dependencies()
)
return Application(self._dependencies, view_map, routing)
class Application:
def __init__(self, dependencies, view_map, routing):
self._dependencies = dependencies
self._view_map = view_map
self._routing = routing
def __call__(self, environ, start_response):
response = self._get_response(environ)
return response(environ, start_response)
def _get_response(self, environ):
injector = self._dependencies.build_injector(late_bound_values={
'environ': environ,
})
method = injector.get_dependency('method')
path = injector.get_dependency('path')
route_name, segment_matches = self._routing.path_to_route(path, method)
if route_name is None:
return self._404('Route not found')
view = self._view_map.get_view(route_name)
if view is None:
return self._404('No view found for route ' + route_name)
result = injector.inject(view.fn, view.dependencies)
if isinstance(result, Response):
return result
else: # Assume that the result is text
return Response(result, mimetype='text/plain')
def _404(self, message):
return Response(message, status=404)
|
mit
| -8,198,006,270,251,723,000
| 32.421053
| 79
| 0.656693
| false
| 4.233333
| false
| false
| false
|
kevin-intel/scikit-learn
|
sklearn/feature_extraction/text.py
|
2
|
70670
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import array
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
import numbers
from operator import itemgetter
import re
import unicodedata
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import normalize
from ._hash import FeatureHasher
from ._stop_words import ENGLISH_STOP_WORDS
from ..utils.validation import check_is_fitted, check_array, FLOAT_DTYPES
from ..utils import _IS_32BIT
from ..utils.fixes import _astype_copy_false
from ..exceptions import NotFittedError
__all__ = ['HashingVectorizer',
'CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all fo the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(doc, analyzer=None, tokenizer=None, ngrams=None,
preprocessor=None, decoder=None, stop_words=None):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : string
The string to strip
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize('NFKD', s)
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : string
The string to strip
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
Parameters
----------
s : string
The string to strip
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixin:
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
return partial(
_preprocess, accent_function=strip_accents, lower=self.lowercase
)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, '_stop_words_id', None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn('Your stop_words may be inconsistent with '
'your preprocessing. Tokenizing the stop '
'words generated tokens %r not in '
'stop_words.' % sorted(inconsistent))
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return 'error'
def build_analyzer(self):
"""Return a callable that handles preprocessing, tokenization
and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(
_analyze, analyzer=self.analyzer, decoder=self.decode
)
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return partial(_analyze, ngrams=self._char_ngrams,
preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'char_wb':
return partial(_analyze, ngrams=self._char_wb_ngrams,
preprocessor=preprocess, decoder=self.decode)
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess,
tokenize)
return partial(_analyze, ngrams=self._word_ngrams,
tokenizer=tokenize, preprocessor=preprocess,
decoder=self.decode, stop_words=stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in range(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_params(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(self.ngram_range))
def _warn_for_unused_params(self):
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn("The parameter 'token_pattern' will not be used"
" since 'tokenizer' is not None'")
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn("The parameter 'preprocessor' will not be used"
" since 'analyzer' is callable'")
if (self.ngram_range != (1, 1) and self.ngram_range is not None
and callable(self.analyzer)):
warnings.warn("The parameter 'ngram_range' will not be used"
" since 'analyzer' is callable'")
if self.analyzer != 'word' or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn("The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'")
if self.token_pattern is not None and \
self.token_pattern != r"(?u)\b\w\w+\b":
warnings.warn("The parameter 'token_pattern' will not be used"
" since 'analyzer' != 'word'")
if self.tokenizer is not None:
warnings.warn("The parameter 'tokenizer' will not be used"
" since 'analyzer' != 'word'")
class HashingVectorizer(TransformerMixin, _VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer is not callable``.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer is not callable``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
dtype : type, default=np.float64
Type of the matrix returned by fit_transform() or transform().
Examples
--------
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(corpus)
>>> print(X.shape)
(4, 16)
See Also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, *, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', alternate_sign=True,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._warn_for_unused_params()
self._validate_params()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_params()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
alternate_sign=self.alternate_sign)
def _more_tags(self):
return {'X_types': ['string']}
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(_VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (strip_accents and lowercase) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer is not callable``.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
Only applies if ``analyzer is not callable``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
first read from the file and then passed to the given callable
analyzer.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, default=np.int64
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
Examples
--------
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = CountVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> print(vectorizer.get_feature_names())
['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', 'this']
>>> print(X.toarray())
[[0 1 1 1 0 0 1 0 1]
[0 2 0 1 0 1 1 0 1]
[1 0 0 1 1 0 1 1 1]
[0 1 1 1 0 0 1 0 1]]
>>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2))
>>> X2 = vectorizer2.fit_transform(corpus)
>>> print(vectorizer2.get_feature_names())
['and this', 'document is', 'first document', 'is the', 'is this',
'second document', 'the first', 'the second', 'the third', 'third one',
'this document', 'this is', 'this the']
>>> print(X2.toarray())
[[0 0 1 1 0 0 1 0 0 0 0 1 0]
[0 1 0 1 0 1 0 1 0 0 1 0 0]
[1 0 0 1 0 0 0 0 1 1 0 1 0]
[0 0 1 0 1 0 1 0 0 0 0 0 1]]
See Also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, *, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(vocabulary.items())
map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
if self.lowercase:
for vocab in vocabulary:
if any(map(str.isupper, vocab)):
warnings.warn("Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents")
break
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
if _IS_32BIT:
raise ValueError(('sparse CSR array has {} non-zero '
'elements and requires 64 bit indexing, '
'which is unsupported with 32 bit Python.')
.format(indptr[-1]))
indices_dtype = np.int64
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self._warn_for_unused_params()
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_params()
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
if max_features is not None:
X = self._sort_features(X, vocabulary)
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
if max_features is None:
X = self._sort_features(X, vocabulary)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of arrays of shape (n_samples,)
List of arrays of terms.
"""
self._check_vocabulary()
# We need CSR format for fast row manipulations.
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
if sp.issparse(X):
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
else:
return [inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : list
A list of feature names.
"""
self._check_vocabulary()
return [t for t, i in sorted(self.vocabulary_.items(),
key=itemgetter(1))]
def _more_tags(self):
return {'X_types': ['string']}
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(TransformerMixin, BaseEstimator):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf for a term t of a document d
in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
n is the total number of documents in the document set and df(t) is the
document frequency of t; the document frequency is the number of documents
in the document set that contain the term t. The effect of adding "1" to
the idf in the equation above is that terms with zero idf, i.e., terms
that occur in all documents in a training set, will not be entirely
ignored.
(Note that the idf formula above differs from the standard textbook
notation that defines the idf as
idf(t) = log [ n / (df(t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
* 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`
use_idf : bool, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 1.0
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.pipeline import Pipeline
>>> import numpy as np
>>> corpus = ['this is the first document',
... 'this document is the second document',
... 'and this is the third one',
... 'is this the first document']
>>> vocabulary = ['this', 'document', 'first', 'is', 'second', 'the',
... 'and', 'one']
>>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)),
... ('tfid', TfidfTransformer())]).fit(corpus)
>>> pipe['count'].transform(corpus).toarray()
array([[1, 1, 1, 1, 0, 1, 0, 0],
[1, 2, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 0, 0]])
>>> pipe['tfid'].idf_
array([1. , 1.22314355, 1.51082562, 1. , 1.91629073,
1. , 1.91629073, 1.91629073])
>>> pipe.transform(corpus).shape
(4, 8)
References
----------
.. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.
.. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.
"""
def __init__(self, *, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape n_samples, n_features)
A matrix of term/token counts.
"""
X = self._validate_data(X, accept_sparse=('csr', 'csc'))
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
df = df.astype(dtype, **_astype_copy_false(df))
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(n_samples / df) + 1
self._idf_diag = sp.diags(idf, offsets=0,
shape=(n_features, n_features),
format='csr',
dtype=dtype)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix of (n_samples, n_features)
a matrix of term/token counts
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
"""
X = self._validate_data(X, accept_sparse='csr',
dtype=FLOAT_DTYPES, copy=copy, reset=False)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
# idf_ being a property, the automatic attributes detection
# does not work as usual and we need to specify the attribute
# name:
check_is_fitted(self, attributes=["idf_"],
msg='idf vector is not fitted')
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self._idf_diag = sp.spdiags(value, diags=0, m=n_features,
n=n_features, format='csr')
def _more_tags(self):
return {'X_types': 'sparse'}
class TfidfVectorizer(CountVectorizer):
r"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'}, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer is not callable``.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
stop_words : {'english'}, list, default=None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer is not callable``.
max_df : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : bool, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs).
dtype : dtype, default=float64
Type of the matrix returned by fit_transform() or transform().
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
* 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
idf_ : array of shape (n_features,)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfTransformer : Performs the TF-IDF transformation from a provided
matrix of counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = TfidfVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> print(vectorizer.get_feature_names())
['and', 'document', 'first', 'is', 'one', 'second', 'the', 'third', 'this']
>>> print(X.shape)
(4, 9)
"""
def __init__(self, *, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.float64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super().__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
self._validate_vocabulary()
if hasattr(self, 'vocabulary_'):
if len(self.vocabulary_) != len(value):
raise ValueError("idf length = %d must be equal "
"to vocabulary size = %d" %
(len(value), len(self.vocabulary)))
self._tfidf.idf_ = value
def _check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn("Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64."
.format(FLOAT_DTYPES, self.dtype),
UserWarning)
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg='The TF-IDF vectorizer is not fitted')
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def _more_tags(self):
return {'X_types': ['string'], '_skip_test': True}
|
bsd-3-clause
| 227,654,248,893,633,020
| 36.609367
| 79
| 0.595432
| false
| 4.421171
| false
| false
| false
|
graik/biskit
|
archive_biskit2/Biskit/Dock/settings.py
|
1
|
3174
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Settings
========
This module provides Dock-global settings as fields. Throughout
Biskit.Dock a (environment-dependent) parameter such as, e.g., ssh_bin
can be addressed as:
>>> import Biskit.Dock.settings as S
>>> bin = S.ssh_bin
However, since a user should not be required to hack python modules,
ssh_bin is not actually defined in settings.py. Instead, the value is
taken from C{~/.biskit/settings_Dock.cfg} -- which should have an entry
like C{ssh_bin=/bin/ssh # comment}. If this entry (or the config file)
is not found, settings.py uses the default value from
C{biskit/Biskit/data/defaults/settings_Dock.cfg}.
If missing, the user configuration file C{~/.biskit/settings_Dock.cfg} is
created automatically during the startup of Biskit (i.e. for any
import). The auto-generated file only contains parameters for which
the default values don't seem to work (invalid paths or binaries).
See L{Biskit.SettingsManager}
Summary for Biskit users
------------------------
If you want to change a biskit parameter, do so in
C{~/.biskit/settings_Dock.cfg}
Summary for Biskit developpers
------------------------------
If you want to create a new user-adjustable parameter, do so in
C{biskit/Biskit/data/defaults/settings_Dock.cfg}.
Summary for all
---------------
!Dont't touch C{settings.py}!
"""
import Biskit as B
import Biskit.tools as T
import Biskit.SettingsManager as M
import user, sys
__CFG_DEFAULT = T.dataRoot() + '/defaults/settings_Dock.cfg'
__CFG_USER = user.home + '/.biskit/settings_Dock.cfg'
try:
m = M.SettingsManager(__CFG_DEFAULT, __CFG_USER, createmissing=True )
m.updateNamespace( locals() )
except Exception, why:
B.EHandler.fatal( 'Error importing Biskit.Dock settings')
##############################
## Check environment variables
env = {}
hex_env = {'HEX_ROOT':'/home/Bis/johan/APPLICATIONS/HEX',
'HEX_CACHE':'/home/Bis/johan/APPLICATIONS/HEX/hex_cache',
'HEX_VERSION':'4b'}
prosaII_env = {'PROSA_BASE':'/home/Bis/shared/rh73/prosa/prosabase/'}
env.update(hex_env)
env.update(prosaII_env)
######################
## clean up name space
del B, T, M, user, sys
del __CFG_DEFAULT, __CFG_USER, m
################
## empty test ##
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Mock test, settings is always executed anyway."""
pass
|
gpl-3.0
| -4,584,866,479,027,789,300
| 29.815534
| 74
| 0.694707
| false
| 3.412903
| false
| false
| false
|
hologram-io/hologram-python
|
Hologram/Authentication/CSRPSKAuthentication.py
|
1
|
2419
|
# CSRPSKAuthentication.py - Hologram Python SDK CSRPSKAuthentication interface
#
# Author: Hologram <support@hologram.io>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
# This CSRPSKAuthentication file implements the CSRPSK authentication interface.
#
# LICENSE: Distributed under the terms of the MIT License
#
import json
from Exceptions.HologramError import AuthenticationError
from Hologram.Authentication.HologramAuthentication import HologramAuthentication
DEVICE_KEY_LEN = 8
class CSRPSKAuthentication(HologramAuthentication):
def __init__(self, credentials):
self._data = {}
super().__init__(credentials=credentials)
def buildPayloadString(self, messages, topics=None, modem_type=None,
modem_id=None, version=None):
self.enforceValidDeviceKey()
super().buildPayloadString(messages,
topics=topics,
modem_type=modem_type,
modem_id=modem_id,
version=version)
payload = json.dumps(self._data) + "\r\r"
return payload.encode()
def buildSMSPayloadString(self, destination_number, message):
self.enforceValidDeviceKey()
send_data = 'S' + self.credentials['devicekey']
send_data += destination_number + ' ' + message
send_data += "\r\r"
return send_data.encode()
def buildAuthString(self, timestamp=None, sequence_number=None):
self._data['k'] = self.credentials['devicekey']
def buildMetadataString(self, modem_type, modem_id, version):
formatted_string = f"{self.build_modem_type_id_str(modem_type, modem_id)}-{version}"
self._data['m'] = self.metadata_version.decode() + formatted_string
def buildTopicString(self, topics):
self._data['t'] = topics
def buildMessageString(self, messages):
self._data['d'] = messages
def enforceValidDeviceKey(self):
if not isinstance(self.credentials, dict):
raise AuthenticationError('Credentials is not a dictionary')
elif not self.credentials['devicekey']:
raise AuthenticationError('Must set devicekey to use CSRPSKAuthentication')
elif len(self.credentials['devicekey']) != DEVICE_KEY_LEN:
raise AuthenticationError('Device key must be %d characters long' % DEVICE_KEY_LEN)
|
mit
| 5,625,506,043,620,905,000
| 35.104478
| 95
| 0.649442
| false
| 4.192374
| false
| false
| false
|
dspmeng/code
|
scripts/GopBitRate.py
|
1
|
1764
|
import csv
import itertools
import numpy as np
import getopt
from os import path
from os import system
from sys import argv
try:
opts, args = getopt.getopt(argv[1:], 'f:')
except getopt.GetoptError:
print 'GopBitRate.py -f <frame rate> <stats csv>'
print opts
print args
frameRate = 30
for opt, arg in opts:
if opt == '-f':
frameRate = int(arg)
else:
print 'unknow opt: ', opt
print 'frame rate: %d' % frameRate
esFile = args[0]
stats = path.splitext(path.basename(esFile))[0] + '.csv'
system('ffprobe -of csv -show_frames ' + esFile + '>' + stats)
frameType = ''
count = 0
totalFrames = 0
totalSize = 0
gopFrames = []
gopSize = []
size = 0
with open(stats, 'r') as f:
reader = csv.reader(f)
for row in itertools.islice(reader, 0, None):
try:
count = int(row[-5])
frameType = row[-6]
totalFrames += 1
size = int(row[13])
totalSize += size
if frameType == 'I':
gopFrames.append(0)
gopSize.append(0)
gopFrames[-1] += 1
gopSize[-1] += size
except Exception, e:
print str(e) + ': %s' % row
totalSize *= 8
print 'Total size (%d frames): %d bits' % (totalFrames, totalSize)
print 'Average bitrate: %f Mbps' % (totalSize * frameRate / totalFrames / 1000000.0)
normGopSize = map(lambda x,y:x/y*frameRate*8/1000000.0, gopSize, gopFrames)
maxGopSize = max(normGopSize)
maxGop = normGopSize.index(maxGopSize)
print 'Maximum bitrate(Gop#%d): %f Mbps' % (maxGop, normGopSize[maxGop])
for i in np.argsort(normGopSize)[::-1][:10]:
print 'GOP#%3d(%4d): %8d bytes, %6d frames -> %f Mbps' % (i, sum(gopFrames[0:i+1]) - gopFrames[i], gopSize[i], gopFrames[i], normGopSize[i])
|
apache-2.0
| 3,685,222,563,568,351,000
| 27
| 144
| 0.607143
| false
| 2.935108
| false
| false
| false
|
iandees/all-the-places
|
locations/spiders/hihostels.py
|
1
|
1934
|
# -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
class HiHostelsSpider(scrapy.Spider):
name = "hihostels"
allowed_domains = ['hihostels.com']
start_urls = (
'https://www.hihostels.com/sitemap.xml',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
regex = re.compile(r'http\S+hihostels.com/\S+/hostels/\S+')
for path in city_urls:
if not re.search(regex,path):
pass
else:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
def parse_store(self, response):
properties = {
'name': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()),
'ref': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/h1/span/text()').extract()[0].split()),
'addr_full': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[0].split()),
'city': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[1].split()),
'postcode': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-2].split()),
'country': " ".join(response.xpath('/html/body/div[1]/div[6]/div[2]/div[1]/div[2]/p[1]/text()').extract()[0].split(',')[-1].split()),
'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
'lon': float(response.xpath('//*[@id ="lon"]/@value').extract()[0]),
'lat': float(response.xpath('//*[@id ="lat"]/@value').extract()[0]),
}
yield GeojsonPointItem(**properties)
|
mit
| 2,543,737,011,314,564,600
| 45.047619
| 146
| 0.542399
| false
| 3.196694
| false
| false
| false
|
senttech/Cura
|
cura/Settings/MachineManager.py
|
1
|
50213
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal
from PyQt5.QtWidgets import QMessageBox
from UM.Application import Application
from UM.Preferences import Preferences
from UM.Logger import Logger
from UM.Message import Message
from UM.Settings.SettingRelation import RelationType
import UM.Settings
from cura.PrinterOutputDevice import PrinterOutputDevice
from . import ExtruderManager
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
import time
import os
class MachineManager(QObject):
def __init__(self, parent = None):
super().__init__(parent)
self._active_container_stack = None
self._global_container_stack = None
Application.getInstance().globalContainerStackChanged.connect(self._onGlobalContainerChanged)
## When the global container is changed, active material probably needs to be updated.
self.globalContainerChanged.connect(self.activeMaterialChanged)
self.globalContainerChanged.connect(self.activeVariantChanged)
self.globalContainerChanged.connect(self.activeQualityChanged)
self._active_stack_valid = None
self._onGlobalContainerChanged()
ExtruderManager.getInstance().activeExtruderChanged.connect(self._onActiveExtruderStackChanged)
self._onActiveExtruderStackChanged()
ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeMaterialChanged)
ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeVariantChanged)
ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeQualityChanged)
self.globalContainerChanged.connect(self.activeStackChanged)
self.globalValueChanged.connect(self.activeStackChanged)
ExtruderManager.getInstance().activeExtruderChanged.connect(self.activeStackChanged)
self._empty_variant_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_variant")[0]
self._empty_material_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_material")[0]
self._empty_quality_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality")[0]
self._empty_quality_changes_container = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality_changes")[0]
Preferences.getInstance().addPreference("cura/active_machine", "")
self._global_event_keys = set()
active_machine_id = Preferences.getInstance().getValue("cura/active_machine")
self._printer_output_devices = []
Application.getInstance().getOutputDeviceManager().outputDevicesChanged.connect(self._onOutputDevicesChanged)
if active_machine_id != "":
# An active machine was saved, so restore it.
self.setActiveMachine(active_machine_id)
if self._global_container_stack and self._global_container_stack.getProperty("machine_extruder_count", "value") > 1:
# Make sure _active_container_stack is properly initiated
ExtruderManager.getInstance().setActiveExtruderIndex(0)
self._auto_materials_changed = {}
self._auto_hotends_changed = {}
globalContainerChanged = pyqtSignal()
activeMaterialChanged = pyqtSignal()
activeVariantChanged = pyqtSignal()
activeQualityChanged = pyqtSignal()
activeStackChanged = pyqtSignal()
globalValueChanged = pyqtSignal() # Emitted whenever a value inside global container is changed.
activeValidationChanged = pyqtSignal() # Emitted whenever a validation inside active container is changed
blurSettings = pyqtSignal() # Emitted to force fields in the advanced sidebar to un-focus, so they update properly
outputDevicesChanged = pyqtSignal()
def _onOutputDevicesChanged(self):
for printer_output_device in self._printer_output_devices:
printer_output_device.hotendIdChanged.disconnect(self._onHotendIdChanged)
printer_output_device.materialIdChanged.disconnect(self._onMaterialIdChanged)
self._printer_output_devices.clear()
for printer_output_device in Application.getInstance().getOutputDeviceManager().getOutputDevices():
if isinstance(printer_output_device, PrinterOutputDevice):
self._printer_output_devices.append(printer_output_device)
printer_output_device.hotendIdChanged.connect(self._onHotendIdChanged)
printer_output_device.materialIdChanged.connect(self._onMaterialIdChanged)
self.outputDevicesChanged.emit()
@pyqtProperty("QVariantList", notify = outputDevicesChanged)
def printerOutputDevices(self):
return self._printer_output_devices
def _onHotendIdChanged(self, index, hotend_id):
if not self._global_container_stack:
return
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type="variant", definition=self._global_container_stack.getBottom().getId(), name=hotend_id)
if containers: # New material ID is known
extruder_manager = ExtruderManager.getInstance()
extruders = list(extruder_manager.getMachineExtruders(self.activeMachineId))
matching_extruder = None
for extruder in extruders:
if str(index) == extruder.getMetaDataEntry("position"):
matching_extruder = extruder
break
if matching_extruder and matching_extruder.findContainer({"type": "variant"}).getName() != hotend_id:
# Save the material that needs to be changed. Multiple changes will be handled by the callback.
self._auto_hotends_changed[str(index)] = containers[0].getId()
self._printer_output_devices[0].materialHotendChangedMessage(self._materialHotendChangedCallback)
else:
Logger.log("w", "No variant found for printer definition %s with id %s" % (self._global_container_stack.getBottom().getId(), hotend_id))
def _autoUpdateHotends(self):
extruder_manager = ExtruderManager.getInstance()
for position in self._auto_hotends_changed:
hotend_id = self._auto_hotends_changed[position]
old_index = extruder_manager.activeExtruderIndex
if old_index != int(position):
extruder_manager.setActiveExtruderIndex(int(position))
else:
old_index = None
Logger.log("d", "Setting hotend variant of hotend %s to %s" % (position, hotend_id))
self.setActiveVariant(hotend_id)
if old_index is not None:
extruder_manager.setActiveExtruderIndex(old_index)
def _onMaterialIdChanged(self, index, material_id):
if not self._global_container_stack:
return
definition_id = "fdmprinter"
if self._global_container_stack.getMetaDataEntry("has_machine_materials", False):
definition_id = self._global_container_stack.getBottom().getId()
extruder_manager = ExtruderManager.getInstance()
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "material", definition = definition_id, GUID = material_id)
if containers: # New material ID is known
extruders = list(extruder_manager.getMachineExtruders(self.activeMachineId))
matching_extruder = None
for extruder in extruders:
if str(index) == extruder.getMetaDataEntry("position"):
matching_extruder = extruder
break
if matching_extruder and matching_extruder.findContainer({"type":"material"}).getMetaDataEntry("GUID") != material_id:
# Save the material that needs to be changed. Multiple changes will be handled by the callback.
self._auto_materials_changed[str(index)] = containers[0].getId()
self._printer_output_devices[0].materialHotendChangedMessage(self._materialHotendChangedCallback)
else:
Logger.log("w", "No material definition found for printer definition %s and GUID %s" % (definition_id, material_id))
def _materialHotendChangedCallback(self, button):
if button == QMessageBox.No:
self._auto_materials_changed = {}
self._auto_hotends_changed = {}
return
self._autoUpdateMaterials()
self._autoUpdateHotends()
def _autoUpdateMaterials(self):
extruder_manager = ExtruderManager.getInstance()
for position in self._auto_materials_changed:
material_id = self._auto_materials_changed[position]
old_index = extruder_manager.activeExtruderIndex
if old_index != int(position):
extruder_manager.setActiveExtruderIndex(int(position))
else:
old_index = None
Logger.log("d", "Setting material of hotend %s to %s" % (position, material_id))
self.setActiveMaterial(material_id)
if old_index is not None:
extruder_manager.setActiveExtruderIndex(old_index)
def _onGlobalContainerChanged(self):
if self._global_container_stack:
self._global_container_stack.nameChanged.disconnect(self._onMachineNameChanged)
self._global_container_stack.containersChanged.disconnect(self._onInstanceContainersChanged)
self._global_container_stack.propertyChanged.disconnect(self._onPropertyChanged)
material = self._global_container_stack.findContainer({"type": "material"})
material.nameChanged.disconnect(self._onMaterialNameChanged)
quality = self._global_container_stack.findContainer({"type": "quality"})
quality.nameChanged.disconnect(self._onQualityNameChanged)
self._global_container_stack = Application.getInstance().getGlobalContainerStack()
self._active_container_stack = self._global_container_stack
self.globalContainerChanged.emit()
if self._global_container_stack:
Preferences.getInstance().setValue("cura/active_machine", self._global_container_stack.getId())
self._global_container_stack.nameChanged.connect(self._onMachineNameChanged)
self._global_container_stack.containersChanged.connect(self._onInstanceContainersChanged)
self._global_container_stack.propertyChanged.connect(self._onPropertyChanged)
material = self._global_container_stack.findContainer({"type": "material"})
material.nameChanged.connect(self._onMaterialNameChanged)
quality = self._global_container_stack.findContainer({"type": "quality"})
quality.nameChanged.connect(self._onQualityNameChanged)
def _onActiveExtruderStackChanged(self):
self.blurSettings.emit() # Ensure no-one has focus.
if self._active_container_stack and self._active_container_stack != self._global_container_stack:
self._active_container_stack.containersChanged.disconnect(self._onInstanceContainersChanged)
self._active_container_stack.propertyChanged.disconnect(self._onPropertyChanged)
self._active_container_stack = ExtruderManager.getInstance().getActiveExtruderStack()
if self._active_container_stack:
self._active_container_stack.containersChanged.connect(self._onInstanceContainersChanged)
self._active_container_stack.propertyChanged.connect(self._onPropertyChanged)
else:
self._active_container_stack = self._global_container_stack
self._active_stack_valid = not self._checkStackForErrors(self._active_container_stack)
self.activeValidationChanged.emit()
def _onInstanceContainersChanged(self, container):
container_type = container.getMetaDataEntry("type")
if container_type == "material":
self.activeMaterialChanged.emit()
elif container_type == "variant":
self.activeVariantChanged.emit()
elif container_type == "quality":
self.activeQualityChanged.emit()
def _onPropertyChanged(self, key, property_name):
if property_name == "value":
# If a setting is not settable per extruder, but "has enabled relations" that are settable per extruder
# we need to copy the value to global, so that the front-end displays the right settings.
if not self._active_container_stack.getProperty(key, "settable_per_extruder"):
relations = self._global_container_stack.getBottom()._getDefinition(key).relations
for relation in filter(lambda r: r.role == "enabled" and r.type == RelationType.RequiredByTarget, relations):
# Target setting is settable per extruder
if self._active_container_stack.getProperty(relation.target.key, "settable_per_extruder"):
new_value = self._global_container_stack.getProperty(key, "value")
stacks = [stack for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId())]
for extruder_stack in stacks:
if extruder_stack.getProperty(key, "value") != new_value:
extruder_stack.getTop().setProperty(key, "value", new_value)
break
if property_name == "validationState":
if self._active_stack_valid:
if self._active_container_stack.getProperty(key, "settable_per_extruder"):
changed_validation_state = self._active_container_stack.getProperty(key, property_name)
else:
changed_validation_state = self._global_container_stack.getProperty(key, property_name)
if changed_validation_state in (UM.Settings.ValidatorState.Exception, UM.Settings.ValidatorState.MaximumError, UM.Settings.ValidatorState.MinimumError):
self._active_stack_valid = False
self.activeValidationChanged.emit()
else:
if not self._checkStackForErrors(self._active_container_stack) and not self._checkStackForErrors(self._global_container_stack):
self._active_stack_valid = True
self.activeValidationChanged.emit()
self.activeStackChanged.emit()
@pyqtSlot(str)
def setActiveMachine(self, stack_id):
containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id = stack_id)
if containers:
Application.getInstance().setGlobalContainerStack(containers[0])
@pyqtSlot(str, str)
def addMachine(self, name, definition_id):
container_registry = UM.Settings.ContainerRegistry.getInstance()
definitions = container_registry.findDefinitionContainers(id = definition_id)
if definitions:
definition = definitions[0]
name = self._createUniqueName("machine", "", name, definition.getName())
new_global_stack = UM.Settings.ContainerStack(name)
new_global_stack.addMetaDataEntry("type", "machine")
container_registry.addContainer(new_global_stack)
variant_instance_container = self._updateVariantContainer(definition)
material_instance_container = self._updateMaterialContainer(definition, variant_instance_container)
quality_instance_container = self._updateQualityContainer(definition, variant_instance_container, material_instance_container)
current_settings_instance_container = UM.Settings.InstanceContainer(name + "_current_settings")
current_settings_instance_container.addMetaDataEntry("machine", name)
current_settings_instance_container.addMetaDataEntry("type", "user")
current_settings_instance_container.setDefinition(definitions[0])
container_registry.addContainer(current_settings_instance_container)
new_global_stack.addContainer(definition)
if variant_instance_container:
new_global_stack.addContainer(variant_instance_container)
if material_instance_container:
new_global_stack.addContainer(material_instance_container)
if quality_instance_container:
new_global_stack.addContainer(quality_instance_container)
new_global_stack.addContainer(self._empty_quality_changes_container)
new_global_stack.addContainer(current_settings_instance_container)
ExtruderManager.getInstance().addMachineExtruders(definition, new_global_stack.getId())
Application.getInstance().setGlobalContainerStack(new_global_stack)
## Create a name that is not empty and unique
# \param container_type \type{string} Type of the container (machine, quality, ...)
# \param current_name \type{} Current name of the container, which may be an acceptable option
# \param new_name \type{string} Base name, which may not be unique
# \param fallback_name \type{string} Name to use when (stripped) new_name is empty
# \return \type{string} Name that is unique for the specified type and name/id
def _createUniqueName(self, container_type, current_name, new_name, fallback_name):
return UM.Settings.ContainerRegistry.getInstance().createUniqueName(container_type, current_name, new_name, fallback_name)
## Convenience function to check if a stack has errors.
def _checkStackForErrors(self, stack):
if stack is None:
return False
for key in stack.getAllKeys():
validation_state = stack.getProperty(key, "validationState")
if validation_state in (UM.Settings.ValidatorState.Exception, UM.Settings.ValidatorState.MaximumError, UM.Settings.ValidatorState.MinimumError):
return True
return False
## Remove all instances from the top instanceContainer (effectively removing all user-changed settings)
@pyqtSlot()
def clearUserSettings(self):
if not self._active_container_stack:
return
self.blurSettings.emit()
user_settings = self._active_container_stack.getTop()
user_settings.clear()
## Check if the global_container has instances in the user container
@pyqtProperty(bool, notify = activeStackChanged)
def hasUserSettings(self):
if not self._global_container_stack:
return False
if self._global_container_stack.getTop().findInstances():
return True
for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId()):
if stack.getTop().findInstances():
return True
return False
## Delete a user setting from the global stack and all extruder stacks.
# \param key \type{str} the name of the key to delete
@pyqtSlot(str)
def clearUserSettingAllCurrentStacks(self, key):
if not self._global_container_stack:
return
self._global_container_stack.getTop().removeInstance(key)
for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId()):
stack.getTop().removeInstance(key)
## Check if the global profile does not contain error states
# Note that the _active_stack_valid is cached due to performance issues
# Calling _checkStackForErrors on every change is simply too expensive
@pyqtProperty(bool, notify = activeValidationChanged)
def isActiveStackValid(self):
return bool(self._active_stack_valid)
@pyqtProperty(str, notify = activeStackChanged)
def activeUserProfileId(self):
if self._active_container_stack:
return self._active_container_stack.getTop().getId()
return ""
@pyqtProperty(str, notify = globalContainerChanged)
def activeMachineName(self):
if self._global_container_stack:
return self._global_container_stack.getName()
return ""
@pyqtProperty(str, notify = globalContainerChanged)
def activeMachineId(self):
if self._global_container_stack:
return self._global_container_stack.getId()
return ""
@pyqtProperty(str, notify = activeStackChanged)
def activeStackId(self):
if self._active_container_stack:
return self._active_container_stack.getId()
return ""
@pyqtProperty(str, notify = activeMaterialChanged)
def activeMaterialName(self):
if self._active_container_stack:
material = self._active_container_stack.findContainer({"type":"material"})
if material:
return material.getName()
return ""
@pyqtProperty(str, notify=activeMaterialChanged)
def activeMaterialId(self):
if self._active_container_stack:
material = self._active_container_stack.findContainer({"type": "material"})
if material:
return material.getId()
return ""
@pyqtProperty("QVariantMap", notify = activeMaterialChanged)
def allActiveMaterialIds(self):
if not self._global_container_stack:
return {}
result = {}
for stack in ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks():
material_container = stack.findContainer(type = "material")
if not material_container:
continue
result[stack.getId()] = material_container.getId()
return result
## Get the Material ID associated with the currently active material
# \returns MaterialID (string) if found, empty string otherwise
@pyqtProperty(str, notify=activeQualityChanged)
def activeQualityMaterialId(self):
if self._active_container_stack:
quality = self._active_container_stack.findContainer({"type": "quality"})
if quality:
material_id = quality.getMetaDataEntry("material")
if material_id:
# if the currently active machine inherits its qualities from a different machine
# definition, make sure to return a material that is relevant to that machine definition
definition_id = self.activeDefinitionId
quality_definition_id = self.activeQualityDefinitionId
if definition_id != quality_definition_id:
material_id = material_id.replace(definition_id, quality_definition_id, 1)
return material_id
return ""
@pyqtProperty(str, notify=activeQualityChanged)
def activeQualityName(self):
if self._active_container_stack:
quality = self._active_container_stack.findContainer({"type": "quality_changes"})
if quality and quality != self._empty_quality_changes_container:
return quality.getName()
quality = self._active_container_stack.findContainer({"type": "quality"})
if quality:
return quality.getName()
return ""
@pyqtProperty(str, notify=activeQualityChanged)
def activeQualityId(self):
if self._global_container_stack:
quality = self._global_container_stack.findContainer({"type": "quality_changes"})
if quality and quality != self._empty_quality_changes_container:
return quality.getId()
quality = self._global_container_stack.findContainer({"type": "quality"})
if quality:
return quality.getId()
return ""
@pyqtProperty(str, notify = activeQualityChanged)
def activeQualityType(self):
if self._global_container_stack:
quality = self._global_container_stack.findContainer(type = "quality")
if quality:
return quality.getMetaDataEntry("quality_type")
return ""
@pyqtProperty(str, notify = activeQualityChanged)
def activeQualityChangesId(self):
if self._global_container_stack:
changes = self._global_container_stack.findContainer(type = "quality_changes")
if changes:
return changes.getId()
return ""
## Check if a container is read_only
@pyqtSlot(str, result = bool)
def isReadOnly(self, container_id):
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = container_id)
if not containers or not self._active_container_stack:
return True
return containers[0].isReadOnly()
## Copy the value of the setting of the current extruder to all other extruders as well as the global container.
@pyqtSlot(str)
def copyValueToExtruders(self, key):
if not self._active_container_stack or self._global_container_stack.getProperty("machine_extruder_count", "value") <= 1:
return
new_value = self._active_container_stack.getProperty(key, "value")
stacks = [stack for stack in ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId())]
stacks.append(self._global_container_stack)
for extruder_stack in stacks:
if extruder_stack != self._active_container_stack and extruder_stack.getProperty(key, "value") != new_value:
extruder_stack.getTop().setProperty(key, "value", new_value)
@pyqtSlot(str)
def setActiveMaterial(self, material_id):
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = material_id)
if not containers or not self._active_container_stack:
return
Logger.log("d", "Attempting to change the active material to %s", material_id)
old_variant = self._active_container_stack.findContainer({"type": "variant"})
old_material = self._active_container_stack.findContainer({"type": "material"})
old_quality = self._active_container_stack.findContainer({"type": "quality"})
old_quality_changes = self._active_container_stack.findContainer({"type": "quality_changes"})
if not old_material:
Logger.log("w", "While trying to set the active material, no material was found to replace it.")
return
if old_quality_changes.getId() == "empty_quality_changes": #Don't want the empty one.
old_quality_changes = None
self.blurSettings.emit()
old_material.nameChanged.disconnect(self._onMaterialNameChanged)
material_index = self._active_container_stack.getContainerIndex(old_material)
self._active_container_stack.replaceContainer(material_index, containers[0])
containers[0].nameChanged.connect(self._onMaterialNameChanged)
if containers[0].getMetaDataEntry("compatible") == False:
message = Message(catalog.i18nc("@info:status",
"The selected material is imcompatible with the selected machine or configuration."))
message.show()
if old_quality:
if old_quality_changes:
new_quality = self._updateQualityChangesContainer(old_quality.getMetaDataEntry("quality_type"), old_quality_changes.getMetaDataEntry("name"))
else:
new_quality = self._updateQualityContainer(self._global_container_stack.getBottom(), old_variant, containers[0], old_quality.getName())
else:
new_quality = self._updateQualityContainer(self._global_container_stack.getBottom(), old_variant, containers[0])
self.setActiveQuality(new_quality.getId())
@pyqtSlot(str)
def setActiveVariant(self, variant_id):
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = variant_id)
if not containers or not self._active_container_stack:
return
Logger.log("d", "Attempting to change the active variant to %s", variant_id)
old_variant = self._active_container_stack.findContainer({"type": "variant"})
old_material = self._active_container_stack.findContainer({"type": "material"})
if old_variant:
self.blurSettings.emit()
variant_index = self._active_container_stack.getContainerIndex(old_variant)
self._active_container_stack.replaceContainer(variant_index, containers[0])
preferred_material = None
if old_material:
preferred_material_name = old_material.getName()
self.setActiveMaterial(self._updateMaterialContainer(self._global_container_stack.getBottom(), containers[0], preferred_material_name).id)
else:
Logger.log("w", "While trying to set the active variant, no variant was found to replace.")
@pyqtSlot(str)
def setActiveQuality(self, quality_id):
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(id = quality_id)
if not containers or not self._global_container_stack:
return
Logger.log("d", "Attempting to change the active quality to %s", quality_id)
self.blurSettings.emit()
quality_container = None
quality_changes_container = self._empty_quality_changes_container
container_type = containers[0].getMetaDataEntry("type")
if container_type == "quality":
quality_container = containers[0]
elif container_type == "quality_changes":
quality_changes_container = containers[0]
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(
quality_type = quality_changes_container.getMetaDataEntry("quality"))
if not containers:
Logger.log("e", "Could not find quality %s for changes %s, not changing quality", quality_changes_container.getMetaDataEntry("quality"), quality_changes_container.getId())
return
quality_container = containers[0]
else:
Logger.log("e", "Tried to set quality to a container that is not of the right type")
return
quality_type = quality_container.getMetaDataEntry("quality_type")
if not quality_type:
quality_type = quality_changes_container.getName()
for stack in ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks():
extruder_id = stack.getId() if stack != self._global_container_stack else None
criteria = { "quality_type": quality_type, "extruder": extruder_id }
material = stack.findContainer(type = "material")
if material and material is not self._empty_material_container:
criteria["material"] = material.getId()
if self._global_container_stack.getMetaDataEntry("has_machine_quality"):
criteria["definition"] = self.activeQualityDefinitionId
else:
criteria["definition"] = "fdmprinter"
stack_quality = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**criteria)
if not stack_quality:
criteria.pop("extruder")
stack_quality = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**criteria)
if not stack_quality:
stack_quality = quality_container
else:
stack_quality = stack_quality[0]
else:
stack_quality = stack_quality[0]
if quality_changes_container != self._empty_quality_changes_container:
stack_quality_changes = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(name = quality_changes_container.getName(), extruder = extruder_id)[0]
else:
stack_quality_changes = self._empty_quality_changes_container
old_quality = stack.findContainer(type = "quality")
if old_quality:
old_quality.nameChanged.disconnect(self._onQualityNameChanged)
else:
Logger.log("w", "Could not find old quality while changing active quality.")
old_changes = stack.findContainer(type = "quality_changes")
if old_changes:
old_changes.nameChanged.disconnect(self._onQualityNameChanged)
else:
Logger.log("w", "Could not find old quality_changes while changing active quality.")
stack.replaceContainer(stack.getContainerIndex(old_quality), stack_quality)
stack.replaceContainer(stack.getContainerIndex(old_changes), stack_quality_changes)
stack_quality.nameChanged.connect(self._onQualityNameChanged)
stack_quality_changes.nameChanged.connect(self._onQualityNameChanged)
if self.hasUserSettings and Preferences.getInstance().getValue("cura/active_mode") == 1:
# Ask the user if the user profile should be cleared or not (discarding the current settings)
# In Simple Mode we assume the user always wants to keep the (limited) current settings
details = catalog.i18nc("@label", "You made changes to the following setting(s):")
user_settings = self._active_container_stack.getTop().findInstances(**{})
for setting in user_settings:
details = details + "\n " + setting.definition.label
Application.getInstance().messageBox(catalog.i18nc("@window:title", "Switched profiles"), catalog.i18nc("@label", "Do you want to transfer your changed settings to this profile?"),
catalog.i18nc("@label", "If you transfer your settings they will override settings in the profile."), details,
buttons = QMessageBox.Yes + QMessageBox.No, icon = QMessageBox.Question, callback = self._keepUserSettingsDialogCallback)
self.activeQualityChanged.emit()
def _keepUserSettingsDialogCallback(self, button):
if button == QMessageBox.Yes:
# Yes, keep the settings in the user profile with this profile
pass
elif button == QMessageBox.No:
# No, discard the settings in the user profile
global_stack = Application.getInstance().getGlobalContainerStack()
for extruder in ExtruderManager.getInstance().getMachineExtruders(global_stack.getId()):
extruder.getTop().clear()
global_stack.getTop().clear()
@pyqtProperty(str, notify = activeVariantChanged)
def activeVariantName(self):
if self._active_container_stack:
variant = self._active_container_stack.findContainer({"type": "variant"})
if variant:
return variant.getName()
return ""
@pyqtProperty(str, notify = activeVariantChanged)
def activeVariantId(self):
if self._active_container_stack:
variant = self._active_container_stack.findContainer({"type": "variant"})
if variant:
return variant.getId()
return ""
@pyqtProperty(str, notify = globalContainerChanged)
def activeDefinitionId(self):
if self._global_container_stack:
definition = self._global_container_stack.getBottom()
if definition:
return definition.id
return ""
## Get the Definition ID to use to select quality profiles for the currently active machine
# \returns DefinitionID (string) if found, empty string otherwise
# \sa getQualityDefinitionId
@pyqtProperty(str, notify = globalContainerChanged)
def activeQualityDefinitionId(self):
if self._global_container_stack:
return self.getQualityDefinitionId(self._global_container_stack.getBottom())
return ""
## Get the Definition ID to use to select quality profiles for machines of the specified definition
# This is normally the id of the definition itself, but machines can specify a different definition to inherit qualities from
# \param definition (DefinitionContainer) machine definition
# \returns DefinitionID (string) if found, empty string otherwise
def getQualityDefinitionId(self, definition):
definition_id = definition.getMetaDataEntry("quality_definition")
if not definition_id:
definition_id = definition.getId()
return definition_id
## Get the Variant ID to use to select quality profiles for the currently active variant
# \returns VariantID (string) if found, empty string otherwise
# \sa getQualityVariantId
@pyqtProperty(str, notify = activeVariantChanged)
def activeQualityVariantId(self):
if self._global_container_stack:
variant = self._global_container_stack.findContainer({"type": "variant"})
if variant:
return self.getQualityVariantId(self._global_container_stack.getBottom(), variant)
return ""
## Get the Variant ID to use to select quality profiles for variants of the specified definitions
# This is normally the id of the variant itself, but machines can specify a different definition
# to inherit qualities from, which has consequences for the variant to use as well
# \param definition (DefinitionContainer) machine definition
# \param variant (DefinitionContainer) variant definition
# \returns VariantID (string) if found, empty string otherwise
def getQualityVariantId(self, definition, variant):
variant_id = variant.getId()
definition_id = definition.getId()
quality_definition_id = self.getQualityDefinitionId(definition)
if definition_id != quality_definition_id:
variant_id = variant_id.replace(definition_id, quality_definition_id, 1)
return variant_id
## Gets how the active definition calls variants
# Caveat: per-definition-variant-title is currently not translated (though the fallback is)
@pyqtProperty(str, notify = globalContainerChanged)
def activeDefinitionVariantsName(self):
fallback_title = catalog.i18nc("@label", "Nozzle")
if self._global_container_stack:
return self._global_container_stack.getBottom().getMetaDataEntry("variants_name", fallback_title)
return fallback_title
@pyqtSlot(str, str)
def renameMachine(self, machine_id, new_name):
containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id = machine_id)
if containers:
new_name = self._createUniqueName("machine", containers[0].getName(), new_name, containers[0].getBottom().getName())
containers[0].setName(new_name)
self.globalContainerChanged.emit()
@pyqtSlot(str)
def removeMachine(self, machine_id):
# If the machine that is being removed is the currently active machine, set another machine as the active machine.
activate_new_machine = (self._global_container_stack and self._global_container_stack.getId() == machine_id)
ExtruderManager.getInstance().removeMachineExtruders(machine_id)
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "user", machine = machine_id)
for container in containers:
UM.Settings.ContainerRegistry.getInstance().removeContainer(container.getId())
UM.Settings.ContainerRegistry.getInstance().removeContainer(machine_id)
if activate_new_machine:
stacks = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(type = "machine")
if stacks:
Application.getInstance().setGlobalContainerStack(stacks[0])
@pyqtProperty(bool, notify = globalContainerChanged)
def hasMaterials(self):
if self._global_container_stack:
return bool(self._global_container_stack.getMetaDataEntry("has_materials", False))
return False
@pyqtProperty(bool, notify = globalContainerChanged)
def hasVariants(self):
if self._global_container_stack:
return bool(self._global_container_stack.getMetaDataEntry("has_variants", False))
return False
## Property to indicate if a machine has "specialized" material profiles.
# Some machines have their own material profiles that "override" the default catch all profiles.
@pyqtProperty(bool, notify = globalContainerChanged)
def filterMaterialsByMachine(self):
if self._global_container_stack:
return bool(self._global_container_stack.getMetaDataEntry("has_machine_materials", False))
return False
## Property to indicate if a machine has "specialized" quality profiles.
# Some machines have their own quality profiles that "override" the default catch all profiles.
@pyqtProperty(bool, notify = globalContainerChanged)
def filterQualityByMachine(self):
if self._global_container_stack:
return bool(self._global_container_stack.getMetaDataEntry("has_machine_quality", False))
return False
## Get the Definition ID of a machine (specified by ID)
# \param machine_id string machine id to get the definition ID of
# \returns DefinitionID (string) if found, None otherwise
@pyqtSlot(str, result = str)
def getDefinitionByMachineId(self, machine_id):
containers = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id=machine_id)
if containers:
return containers[0].getBottom().getId()
@staticmethod
def createMachineManager(engine=None, script_engine=None):
return MachineManager()
def _updateVariantContainer(self, definition):
if not definition.getMetaDataEntry("has_variants"):
return self._empty_variant_container
containers = []
preferred_variant = definition.getMetaDataEntry("preferred_variant")
if preferred_variant:
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "variant", definition = definition.id, id = preferred_variant)
if not containers:
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(type = "variant", definition = definition.id)
if containers:
return containers[0]
return self._empty_variant_container
def _updateMaterialContainer(self, definition, variant_container = None, preferred_material_name = None):
if not definition.getMetaDataEntry("has_materials"):
return self._empty_material_container
search_criteria = { "type": "material" }
if definition.getMetaDataEntry("has_machine_materials"):
search_criteria["definition"] = self.getQualityDefinitionId(definition)
if definition.getMetaDataEntry("has_variants") and variant_container:
search_criteria["variant"] = self.getQualityVariantId(definition, variant_container)
else:
search_criteria["definition"] = "fdmprinter"
if preferred_material_name:
search_criteria["name"] = preferred_material_name
else:
preferred_material = definition.getMetaDataEntry("preferred_material")
if preferred_material:
search_criteria["id"] = preferred_material
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria)
if containers:
return containers[0]
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria)
if "variant" in search_criteria or "id" in search_criteria:
# If a material by this name can not be found, try a wider set of search criteria
search_criteria.pop("variant", None)
search_criteria.pop("id", None)
containers = UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(**search_criteria)
if containers:
return containers[0]
Logger.log("w", "Unable to find a material container with provided criteria, returning an empty one instead.")
return self._empty_material_container
def _updateQualityContainer(self, definition, variant_container, material_container = None, preferred_quality_name = None):
container_registry = UM.Settings.ContainerRegistry.getInstance()
search_criteria = { "type": "quality" }
if definition.getMetaDataEntry("has_machine_quality"):
search_criteria["definition"] = self.getQualityDefinitionId(definition)
if definition.getMetaDataEntry("has_materials") and material_container:
search_criteria["material"] = material_container.id
else:
search_criteria["definition"] = "fdmprinter"
if preferred_quality_name and preferred_quality_name != "empty":
search_criteria["name"] = preferred_quality_name
else:
preferred_quality = definition.getMetaDataEntry("preferred_quality")
if preferred_quality:
search_criteria["id"] = preferred_quality
containers = container_registry.findInstanceContainers(**search_criteria)
if containers:
return containers[0]
if "material" in search_criteria:
# First check if we can solve our material not found problem by checking if we can find quality containers
# that are assigned to the parents of this material profile.
try:
inherited_files = material_container.getInheritedFiles()
except AttributeError: # Material_container does not support inheritance.
inherited_files = []
if inherited_files:
for inherited_file in inherited_files:
# Extract the ID from the path we used to load the file.
search_criteria["material"] = os.path.basename(inherited_file).split(".")[0]
containers = container_registry.findInstanceContainers(**search_criteria)
if containers:
return containers[0]
# We still weren't able to find a quality for this specific material.
# Try to find qualities for a generic version of the material.
material_search_criteria = { "type": "material", "material": material_container.getMetaDataEntry("material"), "color_name": "Generic"}
if definition.getMetaDataEntry("has_machine_quality"):
if material_container:
material_search_criteria["definition"] = material_container.getDefinition().id
if definition.getMetaDataEntry("has_variants"):
material_search_criteria["variant"] = material_container.getMetaDataEntry("variant")
else:
material_search_criteria["definition"] = self.getQualityDefinitionId(definition)
if definition.getMetaDataEntry("has_variants") and variant_container:
material_search_criteria["variant"] = self.getQualityVariantId(definition, variant_container)
else:
material_search_criteria["definition"] = "fdmprinter"
material_containers = container_registry.findInstanceContainers(**material_search_criteria)
if material_containers:
search_criteria["material"] = material_containers[0].getId()
containers = container_registry.findInstanceContainers(**search_criteria)
if containers:
return containers[0]
if "name" in search_criteria or "id" in search_criteria:
# If a quality by this name can not be found, try a wider set of search criteria
search_criteria.pop("name", None)
search_criteria.pop("id", None)
containers = container_registry.findInstanceContainers(**search_criteria)
if containers:
return containers[0]
# Notify user that we were unable to find a matching quality
message = Message(catalog.i18nc("@info:status", "Unable to find a quality profile for this combination. Default settings will be used instead."))
message.show()
return self._empty_quality_container
## Finds a quality-changes container to use if any other container
# changes.
#
# \param quality_type The quality type to find a quality-changes for.
# \param preferred_quality_changes_name The name of the quality-changes to
# pick, if any such quality-changes profile is available.
def _updateQualityChangesContainer(self, quality_type, preferred_quality_changes_name = None):
container_registry = UM.Settings.ContainerRegistry.getInstance() # Cache.
search_criteria = { "type": "quality_changes" }
search_criteria["quality"] = quality_type
if preferred_quality_changes_name:
search_criteria["name"] = preferred_quality_changes_name
# Try to search with the name in the criteria first, since we prefer to have the correct name.
containers = container_registry.findInstanceContainers(**search_criteria)
if containers: # Found one!
return containers[0]
if "name" in search_criteria:
del search_criteria["name"] # Not found, then drop the name requirement (if we had one) and search again.
containers = container_registry.findInstanceContainers(**search_criteria)
if containers:
return containers[0]
return self._empty_quality_changes_container # Didn't find anything with the required quality_type.
def _onMachineNameChanged(self):
self.globalContainerChanged.emit()
def _onMaterialNameChanged(self):
self.activeMaterialChanged.emit()
def _onQualityNameChanged(self):
self.activeQualityChanged.emit()
|
agpl-3.0
| -7,270,229,336,275,078,000
| 49.012948
| 192
| 0.669548
| false
| 4.503408
| false
| false
| false
|
Foggalong/scraps
|
files/cc/game.py
|
1
|
2715
|
#!/usr/bin/python3
import time
from random import randint
# Game Vars
# Health
hvar = 100
svar = 100
# Fight
# stats not final
# player
# weapons
weaponls = ['bare hands', 'sword', 'axe', 'staff']
wpninpackls = [1, 1 ,1, 0]
wpnhealthls = [100, 20, 30, 50]
wpndamagels = [5, 7, 10, 20]
wpnchancels = [8, 7, 5, 6]
# monsters
monsterls = ['goblin', 'troll', 'warlock']
monhealthls = [10, 20, 50]
mondamagels = [5, 10, 15]
monchancels = [2, 5, 8] # value out of ten
#/Fight
class funct:
def info(self, item):
if item in monsterls:
print("Name:", item)
print("Type: monster")
print("Health:", monhealthls[monsterls.index(item)])
print("Damage:", mondamagels[monsterls.index(item)])
print("Chance:", monchancels[monsterls.index(item)])
elif item in weaponls:
print("Name:", item)
print("Type: weapon")
print("Health:", wpnhealthls[weaponls.index(item)])
print("Damage:", wpndamagels[weaponls.index(item)])
print("Chance:", wpnchancels[weaponls.index(item)])
else:
print("No information could be found.")
def fight(self, monster):
global hvar
ind = monsterls.index(monster)
monhealth, mondamage, monchance = monhealthls[ind], mondamagels[ind], monchancels[ind]
run = 1
while run == 1:
action = input("\n> ")
# if 'attack' in action:
# any(word in str1 for word in weapon)
if action == 'fight':
roll = randint(0, 10)
if roll > monchance:
monhealth -= 7
print("You landed a blow!")
elif roll == monchance:
print("You and the", monster, "clashed!")
elif roll < monchance:
print("The", monster, "landed a blow!")
hvar -= mondamage
if monhealth < 1 or hvar < 1:
if monhealth < 1:
print("You killed the "+monster+"!\n")
elif hvar < 1:
print("The "+monster+" killed you!\n")
break
elif action == 'battle info':
print("Your health:", hvar)
print(monster+"'s health:", monhealth, "\n")
elif action.split()[0] == 'info':
try:
funct.info(self, action.split()[1])
except:
print("Information about what?")
monster = monsterls[randint(0, len(monsterls)-1)]
action = input("A wild "+monster+" appears!\n> ")
if action == 'fight':
funct.fight(funct, monster)
elif action == 'run':
print("You ran away from the "+monster+"! WUSS!")
print("\nDebug died!")
print("Program fin.\nIt will close in\n1 mintue.")
time.sleep(60)
# A nice example of classes
"""
Paul 12
>>> class tut:
... def name(self, name):
... print(name, age)
... age = 12
...
>>> tut.name("Paul")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: name() takes exactly 2 arguments (1 given)
>>> tut.name(tut, "Paul")
Paul 12
>>>
"""
|
gpl-2.0
| -3,856,779,248,668,732,000
| 22.617391
| 88
| 0.624309
| false
| 2.776074
| false
| false
| false
|
lavish/drs
|
robot/ev3dev_utils.py
|
1
|
3416
|
import time, ev3dev
def run_for(motor, power=75, ever=None, seconds=None, degrees=None):
""" Run motor for specified amount of seconds, degrees, or forever
Examples:
run_for(motor, ever=True)
run_for(motor, seconds=0.5)
run_for(motor, degrees=270, power=100)
Power is specified in percents in the range of [-100; 100]. In case the
motor is in regulation mode, the power value is used to compute
pulses_per_seconds value. The upper limits for pulses_per_second assumed to
be 900 and 1200 for tacho and minitacho motors accordingly.
"""
#motor.regulation_mode = ev3dev.motor.mode_on
if motor.regulation_mode == ev3dev.motor.mode_on:
motor.pulses_per_second_setpoint = int(power)
else:
motor.duty_cycle_setpoint = int(power)
if ever is not None:
motor.run_mode = ev3dev.motor.run_mode_forever
elif seconds is not None:
motor.run_mode = ev3dev.motor.run_mode_time
motor.time_setpoint = int(seconds * 1000)
elif degrees is not None:
motor.run_mode = ev3dev.motor.run_mode_position
motor.position_mode = ev3dev.motor.position_mode_relative
motor.position = 0
motor.position_setpoint = int(degrees)
motor.run()
def run_until(motor, power=75, degrees=None, check=None):
""" Run motor until specified position or until check() evaluates to True.
Examples:
run_until(motor, degrees=270, power=40)
run_until(motor, check=lambda: touch_sensor.value())
Power is specified in percents in the range of [-100; 100]. In case the
motor is in regulation mode, the power value is used to compute
pulses_per_seconds value. The upper limits for pulses_per_second assumed to
be 900 and 1200 for tacho and minitacho motors accordingly.
"""
if motor.regulation_mode == ev3dev.motor.mode_on:
if motor.type() == 'tacho':
motor.pulses_per_second_setpoint = int(power * 9)
elif motor.type() == 'minitacho':
motor.pulses_per_second_setpoint = int(power * 12)
else:
motor.duty_cycle_setpoint = int(power)
if degrees is not None:
motor.run_mode = ev3dev.motor.run_mode_position
motor.position_mode = ev3dev.motor.position_mode_absolute
motor.position_setpoint = int(degrees)
else:
motor.run_mode = ev3dev.motor.run_mode_forever
motor.run()
while True:
if degrees is not None:
if not motor.running(): break
elif check():
motor.stop()
break
def drive_for(left_motor, right_motor, direction=0, power=75, ever=None, seconds=None):
""" Run both motors for a specified amount of seconds, or forever. The
direction parameter is in range [-100, 100] and specifies how fast the
robot should turn.
direction = -100: turn left as fast as possible,
direction = 0: drive forward,
direction = 100: turn right as fast as possible.
The motor on the outer arc is driven at full power (specified as 'power'
parameter), and the inner motor power is computed accordingly.
"""
if (direction >= 0):
master = left_motor
slave = right_motor
else:
master = right_motor
slave = left_motor
mpower = power
spower = power * (50 - abs(direction)) / 50
run_for(master, mpower, ever, seconds)
run_for(slave, spower, ever, seconds)
|
mit
| -6,067,089,681,917,996,000
| 34.583333
| 87
| 0.657201
| false
| 3.592008
| false
| false
| false
|
ZeroQI/Hama.bundle
|
Contents/Code/common.py
|
1
|
55275
|
### common ###
# https://www.python.org/dev/peps/pep-0008/
# Usage: "common.GetPosters" = "from common import GetPosters"
### Imports ### ### Functions used ###
# Python Modules #
import os # path.abspath, join, dirname
import time # datetime.datetime.now()
import re # sub
import logging #
import datetime # datetime.now
import ssl, urllib2 # urlopen
import unicodedata #
import StringIO, gzip #
from string import maketrans # maketrans
import threading #local,
tlocal = threading.local()
#Log.Info('tlocal: {}'.format(dir(tlocal)))
### Variables ###
PlexRoot = Core.app_support_path
#if not os.path.isdir(PlexRoot):
# path_location = { 'Windows': '%LOCALAPPDATA%\\Plex Media Server',
# 'MacOSX': '$HOME/Library/Application Support/Plex Media Server',
# 'Linux': '$PLEX_HOME/Library/Application Support/Plex Media Server' }
# PlexRoot = os.path.expandvars(path_location[Platform.OS.lower()] if Platform.OS.lower() in path_location else '~') # Platform.OS: Windows, MacOSX, or Linux
CachePath = os.path.join(PlexRoot, "Plug-in Support", "Data", "com.plexapp.agents.hama", "DataItems")
downloaded = {'posters':0, 'art':0, 'seasons':0, 'banners':0, 'themes':0, 'thumbs': 0}
netLock = Thread.Lock()
netLocked = {}
WEB_LINK = "<a href='%s' target='_blank'>%s</a>"
TVDB_SERIE_URL = 'https://thetvdb.com/?tab=series&id=' # Used in error_log generation
ANIDB_SERIE_URL = 'https://anidb.net/anime/' # Used in error_log generation
DefaultPrefs = ("SerieLanguagePriority", "EpisodeLanguagePriority", "PosterLanguagePriority", "AnidbGenresMainOnly", "MinimumWeight", "adult", "OMDbApiKey") #"Simkl",
FieldListMovies = ('original_title', 'title', 'title_sort', 'roles', 'studio', 'year', 'originally_available_at', 'tagline', 'summary', 'content_rating', 'content_rating_age',
'producers', 'directors', 'writers', 'countries', 'posters', 'art', 'themes', 'rating', 'quotes', 'trivia')
FieldListSeries = ('title', 'title_sort', 'originally_available_at', 'duration','rating', 'reviews', 'collections', 'genres', 'tags' , 'summary', 'extras', 'countries', 'rating_count',
'content_rating', 'studio', 'countries', 'posters', 'banners', 'art', 'themes', 'roles', 'original_title',
'rating_image', 'audience_rating', 'audience_rating_image') # Not in Framework guide 2.1.1, in https://github.com/plexinc-agents/TheMovieDb.bundle/blob/master/Contents/Code/__init__.py
FieldListSeasons = ('summary','posters', 'art') #'summary',
FieldListEpisodes = ('title', 'summary', 'originally_available_at', 'writers', 'directors', 'producers', 'guest_stars', 'rating', 'thumbs', 'duration', 'content_rating', 'content_rating_age', 'absolute_index') #'titleSort
SourceList = ('AniDB', 'MyAnimeList', 'FanartTV', 'OMDb', 'TheTVDB', 'TheMovieDb', 'Plex', 'AnimeLists', 'tvdb4', 'TVTunes', 'Local', 'AniList') #"Simkl",
Movie_to_Serie_US_rating = {"G" : "TV-Y7", "PG" : "TV-G", "PG-13": "TV-PG", "R" : "TV-14", "R+" : "TV-MA", "Rx" : "NC-17"}
COMMON_HEADERS = {'User-agent': 'Plex/HAMA', 'Content-type': 'application/json'}
THROTTLE = {}
### Plex Library XML ###
PLEX_LIBRARY, PLEX_LIBRARY_URL = {}, "http://localhost:32400/library/sections/" # Allow to get the library name to get a log per library https://support.plex.tv/hc/en-us/articles/204059436-Finding-your-account-token-X-Plex-Token
def GetPlexLibraries():
try:
library_xml = XML.ElementFromURL(PLEX_LIBRARY_URL, cacheTime=0, timeout=float(30), headers={"X-Plex-Token": os.environ['PLEXTOKEN']})
PLEX_LIBRARY.clear()
Log.Root('Libraries: ')
for directory in library_xml.iterchildren('Directory'):
for location in directory:
if directory.get("agent") == "com.plexapp.agents.hama": PLEX_LIBRARY[location.get("path")] = directory.get("title") # Only pull libraries that use HAMA to prevent miss identification
Log.Root('[{}] id: {:>2}, type: {:<6}, agent: {:<30}, scanner: {:<30}, library: {:<24}, path: {}'.format('x' if directory.get("agent") == "com.plexapp.agents.hama" else ' ', directory.get("key"), directory.get('type'), directory.get("agent"), directory.get("scanner"), directory.get('title'), location.get("path")))
except Exception as e: Log.Root("PLEX_LIBRARY_URL - Exception: '{}'".format(e))
### Get media directory ###
def GetMediaDir(media, movie, file=False):
if movie: return media.items[0].parts[0].file if file else os.path.dirname(media.items[0].parts[0].file)
else:
for s in media.seasons if media else []: # TV_Show:
for e in media.seasons[s].episodes:
return media.seasons[s].episodes[e].items[0].parts[0].file if file else os.path.dirname(media.seasons[s].episodes[e].items[0].parts[0].file)
### Get media root folder ###
def GetLibraryRootPath(dir, repull_libraries=True):
roots_found, library, root, path = [], '', '', ''
for root in [os.sep.join(dir.split(os.sep)[0:x+2]) for x in range(0, dir.count(os.sep))]:
if root in PLEX_LIBRARY: roots_found.append(root)
if len(roots_found) > 0:
root = max(roots_found)
library = PLEX_LIBRARY[root]
path = os.path.relpath(dir, root)
else:
if repull_libraries:
GetPlexLibraries() # Repull library listings as if a library was created while HAMA was already running, it would not be known
library, root, path = GetLibraryRootPath(dir, repull_libraries=False) # Try again but don't repull libraries as it will get stuck in an infinite loop
else:
path, root = '_unknown_folder', ''
return library, root, path
class PlexLog(object):
''' Logging class to join scanner and agent logging per serie
Usage Scanner: (not used currently in scanner as independant from Hama)
- from "../../Plug-ins/Hama.bundle/Contents/code/common" import PlexLog
- log = PlexLog(file='root/folder/[anidb2-xxxx].log', isAgent=False)
Usage Agent:
- log = common.PlexLog(file='mytest.log', isAgent=True )
- log.debug('some debug message: %s', 'test123')
'''
def Logger (self):
logger = logging.getLogger(hex(threading.currentThread().ident))
return logger if logger.handlers else logging.getLogger('com.plexapp.agents.hama')
def Root (self, msg, *args, **kwargs): logging.getLogger('com.plexapp.agents.hama').debug(msg, *args, **kwargs)
def Debug (self, msg, *args, **kwargs): self.Logger().debug (msg, *args, **kwargs)
def Info (self, msg, *args, **kwargs): self.Logger().info (msg, *args, **kwargs)
def Warning (self, msg, *args, **kwargs): self.Logger().warning (msg, *args, **kwargs)
def Error (self, msg, *args, **kwargs): self.Logger().error ("ERROR: {}".format(msg), *args, **kwargs); self.Root("ERROR: {}".format(msg))
def Critical (self, msg, *args, **kwargs): self.Logger().critical("FATAL: {}".format(msg), *args, **kwargs); self.Root("FATAL: {}".format(msg))
def Open (self, media=None, movie=False, search=False, isAgent=True, log_format='%(message)s', file="", mode='w', maxBytes=4*1024*1024, backupCount=5, encoding=None, delay=False, enable_debug=True):
if not file:
library, root, path = GetLibraryRootPath(GetMediaDir(media, movie))#Get movie or serie episode folder location
mode = 'a' if path in ('_unknown_folder', '_root_') else 'w'
#Logs folder
for char in list("\\/:*?<>|~;"): # remove leftover parenthesis (work with code a bit above)
if char in library: library = library.replace(char, '-') # translate anidb apostrophes into normal ones
LOGS_PATH = os.path.join(CachePath, '_Logs', library)
if not os.path.exists(LOGS_PATH): os.makedirs(LOGS_PATH); self.Debug("[!] folder: '{}'created".format(LOGS_PATH))
if path=='' and root: path='_root_'
filename = path.split(os.sep, 1)[0]+'.agent-search.log' if search else path.split(os.sep, 1)[0]+'.agent-update.log'
file = os.path.join(LOGS_PATH, filename)
try:
log = logging.getLogger(hex(threading.currentThread().ident)) # update thread's logging handler
for handler in log.handlers: log.removeHandler(handler) # remove all old handlers
handler_new = logging.FileHandler(file, mode=mode or 'w', encoding=encoding, delay=delay)
handler_new.setFormatter(logging.Formatter(log_format)) # Set log format
log.addHandler(handler_new)
log.setLevel(logging.DEBUG if enable_debug else logging.INFO) # update level
log = logging.getLogger('com.plexapp.agents.hama') # update hama root's logging handler
library_log = os.path.join(LOGS_PATH, '_root_.agent.log')
if library_log not in [handler.baseFilename for handler in log.handlers if hasattr(handler, 'baseFilename')]:
for handler in log.handlers:
if hasattr(handler, 'baseFilename') and os.path.join(CachePath, '_Logs') in handler.baseFilename: log.removeHandler(handler)
handler_new = logging.handlers.RotatingFileHandler(library_log, mode='a', maxBytes=4*1024*1024, backupCount=1, encoding=encoding, delay=delay)
#handler_new = logging.FileHandler(library_log, mode='w', encoding=encoding, delay=delay)
handler_new.setFormatter(logging.Formatter('%(asctime)-15s - %(thread)x - %(message)s')) # Set log format
log.addHandler(handler_new)
log.info('==== common.PlexLog(file="{}")'.format(file))
except IOError as e: self.isAgent = isAgent; logging.getLogger('com.plexapp.agents.hama').info('updateLoggingConfig: failed to set logfile: {}'.format(e))
self.Info("".ljust(157, '='))
self.Info('common.PlexLog(file="{}", movie={})'.format(file, movie))
self.Info('[!] file: "{}"'.format(GetMediaDir(media, movie, True)))
self.Info('[ ] library: "{}"'.format(library))
self.Info('[ ] root: "{}"'.format(root))
self.Info('[ ] path: "{}"'.format(path))
self.Info('[ ] Plex root: "{}"'.format(PlexRoot))
self.Info('[ ] Log folder: "{}"'.format(os.path.relpath(LOGS_PATH, PlexRoot)))
self.Info('[ ] Log file: "{}"'.format(filename))
self.Info('[ ] Logger: "{}"'.format(hex(threading.currentThread().ident)))
self.Info('[ ] mode: "{}"'.format(mode))
self.isAgent = isAgent
def Close (self):
log = logging.getLogger(hex(threading.currentThread().ident)) # update root logging's handler
for handler in log.handlers: log.removeHandler(handler)
Log = PlexLog()
### Code reduction one-liners that get imported specifically ###
#def GetMeta (source="", field="" ): return (downloaded[field]<=1) and (not source or source in Prefs['posters' if field=='seasons' else field]) and not Prefs['posters' if field=='seasons' else field]=="None"
def GetXml (xml, field ): return xml.xpath(field)[0].text if xml.xpath(field) and xml.xpath(field)[0].text not in (None, '', 'N/A', 'null') else '' #allow isdigit() checks
def urlFilename (url ): return "/".join(url.split('/')[3:])
def urlDomain (url ): return "/".join(url.split('/')[:3])
def natural_sort_key(s ): return [int(text) if text.isdigit() else text for text in re.split(r'([0-9]+)', str(s).lower())] # list.sort(key=natural_sort_key) #sorted(list, key=natural_sort_key) - Turn a string into string list of chunks "z23a" -> ["z", 23, "a"]
def replaceList (string, a, b, *args):
for index in a: string.replace(a[index], b[index], *args)
return string
def LevenshteinRatio(first, second):
return 100 - int(100 * LevenshteinDistance(first, second) / float(max(len(first), len(second)))) if len(first)*len(second) else 0
def LevenshteinDistance(first, second):
""" Compute Levenshtein distance
"""
if len(first) > len(second): first, second = second, first
if len(second) == 0: return len(first)
first_length = len(first ) + 1
second_length = len(second) + 1
distance_matrix = [[0] * second_length for x in range(first_length)]
for i in range(first_length): distance_matrix[i][0] = i
for j in range(second_length): distance_matrix[0][j] = j
for i in xrange(1, first_length):
for j in range(1, second_length):
distance_matrix[i][j] = min(distance_matrix[i][j-1]+1, distance_matrix[i-1][j]+1, distance_matrix[i-1][j-1] + (1 if first[i-1] != second[j-1] else 0))
return distance_matrix[first_length-1][second_length-1]
def IsIndex(var, index): #Avoid TypeError: argument of type 'NoneType' is not iterable
""" Return the length of the array or index no errors
"""
try: return var[index]
except: return ''
def Dict(var, *arg, **kwarg):
""" Return the value of an (imbricated) dictionnary, if all fields exist else return "" unless "default=new_value" specified as end argument
Avoid TypeError: argument of type 'NoneType' is not iterable
Ex: Dict(variable_dict, 'field1', 'field2', default = 0)
"""
for key in arg:
if isinstance(var, dict) and key and key in var: var = var[key]
else: return kwarg['default'] if kwarg and 'default' in kwarg else "" # Allow Dict(var, tvdbid).isdigit() for example
return kwarg['default'] if var in (None, '', 'N/A', 'null') and kwarg and 'default' in kwarg else "" if var in (None, '', 'N/A', 'null') else var
def SaveDict(value, var, *arg):
""" Save non empty value to a (nested) Dictionary fields unless value is a list or dict for which it will extend it instead
# ex: SaveDict(GetXml(ep, 'Rating'), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'rating')
# ex: SaveDict(Dict(TheTVDB_dict, 'title'), TheTVDB_dict, 'title_sort')
# ex: SaveDict(genre1, TheTVDB_dict, genre) to add to current list
# ex: SaveDict([genre1, genre2], TheTVDB_dict, genre) to extend to current list
"""
if not value and value!=0: return "" # update dict only as string would revert to pre call value being immutable
if not arg and (isinstance(var, list) or isinstance(var, dict)):
if not (isinstance(var, list) or isinstance(var, dict)): var = value
elif isinstance(value, list) or isinstance(value, dict): var.extend (value)
else: var.append (value)
return value
for key in arg[:-1]:
if not isinstance(var, dict): return ""
if not key in var: var[key] = {}
var = var[key]
if not arg[-1] in var or not isinstance(var[arg[-1]], list): var[arg[-1]] = value
elif isinstance(value, list) or isinstance(value, dict): var[arg[-1]].extend (value)
else: var[arg[-1]].append (value)
return value
### import var 2 dict into var and returns it
def UpdateDict(var, var2): var.update(var2); return var
def DictString(input_value, max_depth, initial_indent=0, depth=0):
""" Expand a dict down to 'max_depth' and sort the keys.
To print it on a single line with this function use (max_depth=0).
EX: (max_depth=1)
mappingList: {
'season_map': {'13493': {'max': '3', 'min': '3'}}}
EX: (max_depth=2)
mappingList: {
'season_map': {
'9306': {'max': '2', 'min': '1'},
'11665': {'max': '3', 'min': '3'}}}
"""
output = ""
indent = "\n" + " " * initial_indent + " " * (depth+1)
if depth >= max_depth or not isinstance(input_value, dict):
if isinstance(input_value, list) and depth<max_depth: output += "[" + indent + indent.join([("'{}'," if isinstance(x, str) else "{},").format(x) for x in input_value])[:-1] + "]"
elif isinstance(input_value, dict):
for i, key in enumerate(sorted(input_value, key=natural_sort_key)):
output += (
"{}: ".format("'{}'".format(key.replace("'", "\\'")) if isinstance(key, basestring) else key) +
"{}".format("'{}'".format(input_value[key].replace("'", "\\'").replace("\n", "\\n").replace("\r", "\\r")) if isinstance(input_value[key], basestring) else input_value[key]) +
(", " if i!=len(input_value)-1 else "")) # remove last ','
output = "{" + output + "}"
else: output += "{}".format(input_value)
else:
for i, key in enumerate(sorted(input_value, key=natural_sort_key)):
value = input_value[key] if isinstance(input_value[key], basestring) else DictString(input_value[key], max_depth, initial_indent, depth+1)
output += (
indent +
"{}: ".format("'{}'".format(key.replace("'", "\\'")) if isinstance(key, basestring) else key) +
"{}".format("'{}'".format(value.replace("'", "\\'").replace("\n", "\\n").replace("\r", "\\r")) if isinstance(input_value[key], basestring) else value) +
("," if i!=len(input_value)-1 else "")) # remove last ','
output = "{" + output + "}"
return output
# Other options passed on as can't define expansion depth
#import pprint; pprint.pprint(input_value)
#import json; return json.dumps(input_value, indent=2, sort_keys=True)
def ssl_open(url, headers={}, timeout=20):
''' SSLV3_ALERT_HANDSHAKE_FAILURE
1. Do not verify certificates. A bit like how older Python versions worked
Import ssl and urllib2
Use urllib2 with a default ssl context (which does not verify the certificate).
Or:
2. Set PlexPluginCodePolicy to Elevated in Info.plist
Add external Python libraries to your project bundle
Import certifi and requests into your Python code
Use requests
'''
headers = UpdateDict(headers, COMMON_HEADERS)
return urllib2.urlopen(urllib2.Request(url, headers=headers), context=ssl.SSLContext(ssl.PROTOCOL_SSLv23), timeout=timeout).read()
def GetStatusCode(url):
""" This function retreives the status code of a website by requesting HEAD data only from the host.
This means that it only requests the headers. If the host cannot be reached or something else goes wrong, it returns None instead.
urllib.parse.quote(string, safe='/', encoding=None, errors=None)
- string: string your trying to encode
- safe: string contain characters to ignore. Defualt is '/'
- encoding: type of encoding url is in. Default is utf-8
- errors: specifies how errors are handled. Default is 'strict' which throws a UnicodeEncodeError, I think.
#host = "/".join(url.split('/', 3)[:-1]) #path = url.replace(" ", "%20").split('/', 3)[3] #Log.Info("host: '%s', path: '%s'" % (host, path))
"""
try:
request = urllib2.Request(url) #urllib.quote #urllib2.quote(url,':/')
request.get_method = lambda: 'HEAD'
return urllib2.urlopen(request).getcode() # if "Content-Type: audio/mpeg" in response.info(): Log.Info("Content-Type: audio/mpeg")
except Exception as e: return str(e)
def SaveFile(filename="", file="", relativeDirectory=""):
''' Save file to cache, Thanks Dingmatt for folder creation ability
'''
relativeFilename = os.path.join (relativeDirectory, filename)
relativeDirectory, filename = os.path.split(relativeFilename) #if os.sep in filename:
fullpathDirectory = os.path.abspath(os.path.join(CachePath, relativeDirectory))
try:
if not os.path.exists(fullpathDirectory): os.makedirs(fullpathDirectory)
Data.Save(relativeFilename, file)
except Exception as e: Log.Debug("common.SaveFile() - Exception: {exception}, relativeFilename: '{relativeFilename}', file: '{file}'".format(exception=e, relativeFilename=relativeFilename, file=file))
else: Log.Info ("common.SaveFile() - CachePath: '{path}', file: '{file}'".format(path=CachePath, file=relativeFilename))
def decompress(file):
times = 0
try:
while True:
file = gzip.GzipFile(fileobj=StringIO.StringIO(file)).read()
times += 1
except: pass
if times > 0: Log.Root("Decompression times: {}".format(times))
return file
# Return string or object if appropriate
def ObjectFromFile(file=""):
file = decompress(file)
#TEXT file
if isinstance(file, basestring):
#XML
if file.startswith('<?xml '): #if type(file).__name__ == '_Element' or isinstance(file, basestring) and file.startswith('<?xml '):
try: return XML.ElementFromString(file, max_size=1024*1024*10) # Overide max size to 10mb from 5mb default
except Exception as e:
Log.Info("XML corrupted. Exception: {}".format(e))
try: return XML.ElementFromString(file.decode('utf-8','ignore').replace('\b', '').encode("utf-8"))
except Exception as e2: Log.Info("XML still corrupted after normalization. Exception: {}".format(e2)); return
#JSON
elif file.startswith('{'): #Json
try: return JSON.ObjectFromString(file, encoding=None)
except Exception as e: Log.Info("JSON corrupted. Exception: {}".format(e)); return
#Empty file
elif file=="": Log.Info("Empty file"); return
return file
def LoadFileCache(filename="", relativeDirectory=""):
''' Load file in Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems (return file_object, file_age)
'''
relativeFilename = os.path.join(relativeDirectory, filename)
fullpathFilename = os.path.abspath(os.path.join(CachePath, relativeDirectory, filename))
if filename.endswith(".xml.gz"): filename = filename[:-3] #anidb title database
# Load from disk if present
file, file_age, file_object = None, None, None
if Data.Exists(relativeFilename):
try: file = Data.Load(relativeFilename)
except: Log.Debug("common.LoadFileCache() - File cache locally but failed loading - file: {}".format(relativeFilename))
else:
file_object = ObjectFromFile(file)
if file_object:
file_age = time.time() - os.stat(fullpathFilename).st_mtime
else:
Log.Info('common.LoadFileCache() - local file "{}" deleted as failed validity test - file: {}'.format(relativeFilename, file))
Data.Remove(relativeFilename) #DELETE CACHE AS CORRUPTED
return file_object, file_age
def throttle_count(index="", duration=0):
if not index or index not in THROTTLE: return 0
now, removed = time.time(), 0
# Remove entries older than 1 hour
for entry in THROTTLE[index][:]:
if entry < now-duration:
THROTTLE[index].remove(entry)
removed += 1
else: break # First entry found under duration age so all others will also be under as well
if removed: Log.Root("Throttle '{}' count reduced by '{}'".format(index, removed))
return len(THROTTLE[index])
def throttle_add(index=""):
if index:
if index not in THROTTLE: THROTTLE[index] = []
THROTTLE[index].append(time.time())
def LoadFile(filename="", relativeDirectory="", url="", headers={}, data=None, cache=CACHE_1DAY*6, sleep=0, throttle=["", 0, 0]):
''' Load file in Plex Media Server/Plug-in Support/Data/com.plexapp.agents.hama/DataItems if cache time not passed
'''
headers = UpdateDict(headers, COMMON_HEADERS)
if filename.endswith(".gz"): filename = filename[:-3] # Remove and '.gz' from the local filename as it will be decompressed at pull
# Load from disk if present
file_object, file_age = LoadFileCache(filename, relativeDirectory)
if file_object: Log.Debug("common.LoadFile() - File cached locally - Filename: '{file}', Age: '{age:.2f} days', Limit: '{limit} days', url: '{url}'".format(url=url, file=os.path.join(relativeDirectory, filename), age=file_age/CACHE_1DAY, limit=cache/CACHE_1DAY))
#File not cached OR cache older than passed cache age / adjusted AniDB age
file_downloaded = None
if not file_object or file_age > cache:
# Check to see if we are at throttle max and needs to be put on hold
# Done before lock is aquired to alow other threads to move forward
while throttle[0]: # Only check if throttle index is defined
count = throttle_count(throttle[0], throttle[1])
if count >= throttle[2]:
Log.Root("Throttle max hit {}. Waiting 60 sec for headroom".format(throttle))
time.sleep(60)
else: # Add in this pull into the throttle count and continue on
throttle_add(throttle[0])
Log.Root("Throttle: '{}', Duration: {}, Count: {} of {}".format(throttle[0], throttle[1], count+1, throttle[2]))
break
# Thread lock aquire
netLock.acquire()
# Safeguard if netLock does not work as expected
while 'LoadFile' in netLocked and netLocked['LoadFile'][0]:
Log.Root("Waiting for lock: 'LoadFile'")
time.sleep(1)
netLocked['LoadFile'] = (True, int(time.time())) #Log.Root("Lock acquired: 'LoadFile'")
# Download URL to memory, Plex cache to 1 day
try:
file_downloaded = HTTP.Request(url, headers=headers, data=data, timeout=60, cacheTime=CACHE_1DAY).content #'Accept-Encoding':'gzip' # Loaded with Plex cache, str prevent AttributeError: 'HTTPRequest' object has no attribute 'find', None if 'thetvdb' in url else
if url.endswith(".gz"): file_downloaded = decompress(file_downloaded)
except Exception as e:
Log.Error("common.LoadFile() - issue loading url: '{}', filename: '{}', Headers: {}, Exception: '{}'".format(url, filename, headers, e)) # issue loading, but not AniDB banned as it returns "<error>Banned</error>"
else:
Log.Root("Downloaded URL '{}'".format(url))
# Sleeping after call completion to prevent ban
time.sleep(sleep)
# Safeguard if netLock does not work as expected
netLocked['LoadFile'] = (False, 0) #Log.Root("Lock released: 'LoadFile'")
# Thread lock release
netLock.release()
# Donwnloaded File checks and saving as cache #if str(file).startswith("<Element error at ") or file in ('<error>Banned</error>', '<error>aid Missing or Invalid</error>'):
if file_downloaded:
file_downloaded_object = ObjectFromFile(file_downloaded)
if not file_downloaded_object: Log.Error('common.LoadFile() - File received but failed validity, file: "{}"'.format(file_downloaded))
elif url.endswith('.xml') and len(file_downloaded)<24: Log.Error('common.LoadFile() - File received too small (<24 bytes), file: "{}"'.format(file_downloaded))
elif file_downloaded.startswith("<error"): Log.Error('common.LoadFile() - Error response received, file: "{}"'.format(file_downloaded)); return file_downloaded_object
else: SaveFile(filename, file_downloaded, relativeDirectory); return file_downloaded_object
return file_object
### Download images and themes for Plex ###############################################################################################################################
def metadata_download(metadata, metatype, url, filename="", num=99, url_thumbnail=None):
if metatype==metadata.posters: string = "posters"
elif metatype==metadata.art: string = "art"
elif metatype==metadata.banners: string = "banners"
elif metatype==metadata.themes: string = "themes"
elif filename.startswith("TVDB/episodes/"): string = "thumbs"
else: string = "seasons"
if url in metatype: Log.Info("url: '%s', num: '%d', filename: '%s'*" % (url, num, filename))
else:
file, status = None, ""
try:
if filename and Data.Exists(filename): status += ", Found locally"; file = Data.Load(filename)
else:
file = (ssl_open((url_thumbnail or url).replace('thetvdb.com', 'thetvdb.plexapp.com')) if 'thetvdb.com' in url else False) or ssl_open(url_thumbnail or url)
if file: status += ", Downloaded and Saved locally"; SaveFile(filename, file)
if file: metatype[ url ] = Proxy.Preview(file, sort_order=num) if url_thumbnail else Proxy.Media(file, sort_order=num) # or metatype[ url ] != proxy_item # proxy_item =
except Exception as e: Log.Info("common.metadata_download() - Exception: {}, url: '{}', filename: '{}'".format(e, url, filename)); return
downloaded[string] = downloaded[string] + 1
def cleanse_title(string):
""" Cleanse title and translate anidb '`'
"""
DeleteChars = ""
ReplaceChars = maketrans("`:/*?-.,;_", " ") #~
if len(string)<=len(String.StripDiacritics(string))+2: string = String.StripDiacritics(string) #else there is jap characters scrubebd outs
try: string2 = string.encode('ascii', 'replace') # Encode into Ascii, prevent: UnicodeDecodeError: 'utf8' codec can't decode bytes in position 13-14: invalid continuation byte
except: pass
else:
if not string2.count('?'): string=string2
while re.search(r'\([^\(\)]*?\)', string): string = re.sub(r'\([^\(\)]*?\)', ' ', string)
while re.search(r'\[[^\[\]]*?\]', string): string = re.sub(r'\[[^\[\]]*?\]', ' ', string) # string = "qwerty [asdf] zxcv [vbnm] ghjk [tyui]" > 'qwerty zxcv ghjk ', string = "qwerty [asdf zxcv [vbnm] ghjk tyui]" > 'qwerty '
return " ".join(str(unicodedata.normalize('NFC', unicode(string.lower()))).translate(ReplaceChars, DeleteChars).split()) # str needed for translate
def write_logs(media, movie, error_log, source, AniDBid, TVDBid):
""" HAMA - Load logs, add non-present entried then Write log files to Plug-in /Support/Data/com.plexapp.agents.hama/DataItems
"""
Log.Info("=== common.write_logs() ===".ljust(157, '='))
if source == 'anidb': source = 'AniDBid'
elif source == 'tvdb': source = 'TVDBid'
library = GetLibraryRootPath(GetMediaDir(media, movie))[0]
for char in list("\\/:*?<>|~;"):
if char in library: library = library.replace(char, '-')
### File lock ###
sleep_time_max = 10
for log in error_log:
sleep_time = 0
while log in netLocked and netLocked[log][0]:
time.sleep(1)
sleep_time += 1
if sleep_time > sleep_time_max:
Log.Error("Could not obtain the lock in {}sec & lock age is {}sec. Skipping log update.".format(sleep_time_max, int(time.time())-netLocked[1] if 1 in netLocked else "never"))
continue #break #netLock.acquire()
netLocked[log] = (True, int(time.time()))
### Load previous entries ###
Log.Info("{log:<{width}}: {content}".format(log=log, width=max(map(len, error_log)), content=str(error_log[log])))
error_log_array = {}
log_line_separator = "<br />\r\n"
error_log_file = os.path.join('_Logs', library+' - '+log+'.htm')
if Data.Exists(error_log_file):
for line in Data.Load(error_log_file).split(log_line_separator):
if "|" in line: error_log_array[line.split("|", 1)[0].strip()] = line.split("|", 1)[1].strip()
### Remove this serie entry ###
if not log in ["Missing Episodes", "Missing Specials"]: keys = ["AniDBid: "+AniDBid, "AniDBid: "+WEB_LINK % (ANIDB_SERIE_URL + AniDBid, AniDBid), "TVDBid: "+ TVDBid, "TVDBid: "+WEB_LINK % (TVDB_SERIE_URL + TVDBid, TVDBid)]
elif not movie and (len(media.seasons)>2 or max(map(int, media.seasons.keys()))>1): keys = ["TVDBid: %s" % (WEB_LINK % (TVDB_SERIE_URL + TVDBid, TVDBid) )]
else: keys = ["%s: %s" % (source, WEB_LINK % (ANIDB_SERIE_URL + AniDBid if source == "AniDBid" else TVDB_SERIE_URL + TVDBid, AniDBid if source == "AniDBid" else TVDBid) )]
deleted = []
for key in keys:
if key in error_log_array:
deleted.append(error_log_array[key])
del(error_log_array[key]) # remove entry, needs updating or removal...
if not deleted and not error_log[log]: netLocked[log] = (False, 0); continue # didn't delete anything, no entry to add, the only case when we skip
### Generate prefix, append to error_log_array and Save error_log_array ###
log_prefix = ''
if log == 'TVDB posters missing': log_prefix = "Series posters must be 680x1000 and be JPG format. They should not contain spoilers, nudity, or vulgarity. Please ensure they are of high quality with no watermarks, unrelated logos, and that they don't appear stretched." + log_line_separator
if log == 'Plex themes missing': log_prefix = WEB_LINK % ("https://plexapp.zendesk.com/hc/en-us/articles/201572843","Restrictions") + log_line_separator
for entry in error_log[log]: error_log_array[entry.split("|", 1)[0].strip()] = entry.split("|", 1)[1].strip() if len(entry.split("|", 1))>=2 else ""
try: Data.Save(error_log_file, log_prefix + log_line_separator.join(sorted([str(key)+" | "+str(error_log_array[key]) for key in error_log_array], key = lambda x: x.split("|",1)[1] if x.split("|",1)[1].strip().startswith("Title:") and not x.split("|",1)[1].strip().startswith("Title: ''") else int(re.sub(r"<[^<>]*>", "", x.split("|",1)[0]).strip().split()[1].strip("'")) )))
except Exception as e: Log.Error("Exception: '%s'" % e)
netLocked[log] = (False, 0)
def Other_Tags(media, movie, status): # Other_Tags(media, Dict(AniDB_dict, 'status') or Dict(TheTVDB_dict, 'status'))
""" Add genre tags: Status, Extension, Dubbed/Subbed
"""
tags = []
if movie: file = media.items[0].parts[0].file
else:
s = media.seasons.keys()[0] if media.seasons.keys()[0]!='0' else media.seasons.keys()[1] if len(media.seasons.keys()) >1 else None
if s:
e = media.seasons[s].episodes.keys()[0]
file = media.seasons[s].episodes[e].items[0].parts[0]
else: file = ''
### Status tag: #"Ended" or "Continuing", "" from:AniDB, TVDB ###
if status in ('Ended', 'Continuing'): tags.append(status)
if file:
### Extension tag ###
tags.append(str(os.path.splitext(file.file)[1].lstrip('.'))) # avoid u'ext'
### Tag Dubbed/Subbed ###yyy
streams = {1:[], 2:[], 3:[]} #StreamTypes = {1: 'video', 2: 'audio', 3: 'subtitle'}
for stream in file.streams:
if stream.type in streams: streams[stream.type].append(stream.language if hasattr(stream, 'language') else "")
for audio in streams[2]:
if not streams[3]: tags.extend([audio + " Dubbed" for audio in streams[2]])
else: tags.extend([audio + " Subbed " + subtitle for audio in streams[2] for subtitle in streams[3]])
return tags
### Update meta field ###
def UpdateMetaField(metadata_root, metadata, meta_root, fieldList, field, source, movie, source_list):
if field not in meta_root: Log.Info('[!] field: "{}" not in meta_root, source: "{}"'.format(field, source)); return
if type(metadata).__name__=="tuple":
ep_string = ' new season: {:<2}, new_episode: {:<3}'.format(metadata[3], metadata[4])
metadata = metadata[0].seasons[metadata[1]].episodes[metadata[2]]
is_episode = True
else: ep_string, is_episode = "", False
meta_old = getattr(metadata, field) # getattr( metadata, field, None)
meta_new = meta_root[field]
meta_new_short = (meta_new[:80]).replace("\n", "\\n").replace("\r", "\\r")+'..' if isinstance(meta_new, basestring) and len(meta_new)> 80 else meta_new
MetaFieldList = ('directors', 'writers', 'producers', 'guest_stars', 'collections', 'genres', 'tags', 'countries')
MetaRoleList = ('directors', 'writers', 'producers', 'guest_stars', 'roles')
MetaIntList = ('year', 'absolute_number', 'duration')
### Prepare data for comparison ###
try:
if isinstance(meta_new, int):
if field == 'rating': meta_new = float(meta_new)
if isinstance(meta_new, basestring) or isinstance(meta_new, str):
if field == 'rating': meta_new = float(meta_new)
if field == 'title_sort': meta_new = SortTitle(meta_new)
if field == 'originally_available_at': meta_new = Datetime.ParseDate(meta_new).date()
if field in MetaIntList: meta_new = int(meta_new) if meta_new.isdigit() else None
if field in MetaFieldList:
meta_new = re.sub(r'\([^)]*\)', '', meta_new)
meta_new = meta_new.split(',' if ',' in meta_new else '|')
if isinstance(meta_new, list) and field in MetaRoleList:
meta_new = [{'role': Dict(obj, 'role'), 'name': Dict(obj, 'name'), 'photo': Dict(obj,'photo')} if isinstance(obj, dict) else \
{'role': None, 'name': obj, 'photo': None} for obj in meta_new]
except Exception as e: Log.Info("[!] 1{field:<23} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e))
try:
if not isinstance(meta_new, list): meta_old_value = meta_old
elif field in MetaRoleList: meta_old_value = [ {'role': role_obj.role, 'name': role_obj.name, 'photo': role_obj.photo} for role_obj in meta_old] #if role_obj.role]
else: meta_old_value = [x for x in meta_old] #meta_old_value = [ {'role': role_obj.role, 'name': role_obj.name, 'photo': role_obj.photo} for role_obj in meta_old]
except Exception as e: Log.Info("[!] 2{field:<23} Sources: {sources:<11} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e))
### Update ONLY IF REQUIRED ###
if '|' in Prefs[field]:
if metadata_root==metadata: sources = '|'.join([Prefs[field].split('|')[is_episode].replace(source, '('+source+')'), Prefs[field].split('|')[1]])
else: sources = '|'.join([Prefs[field].split('|')[is_episode], Prefs[field].split('|')[1].replace(source, '('+source+')')])
else: sources = Prefs[field].replace(source, '('+source+')')
if isinstance(meta_new, dict) and field=='posters': Log.Info('[?] meta_new: {}\n meta_old: {}'.format(DictString(meta_new, 1, 4), DictString(sorted(meta_old.keys(), key=natural_sort_key), 1, 4))) # Can't print meta_old values as plex custom class without a string print call
if meta_new == meta_old_value or field not in MetaRoleList and (isinstance(meta_new, dict) and set(meta_new.keys()).issubset(meta_old.keys()) or isinstance(meta_new, list) and set(meta_new)== set(meta_old)):
Log.Info("[=] {field:<23} {len:>4} Sources: {sources:<60} Inside: '{source_list}' Value: '{value}'".format(field=field, len="({:>2})".format(len(meta_root[field])) if isinstance(meta_root[field], (list, dict)) else "", sources=sources, value=meta_new_short, source_list=source_list))
else:
Log.Info("[x] {field:<23} {len:>4} Sources: {sources:<60} Inside: '{source_list}' Value: '{value}'".format(field=field, len="({:>2})".format(len(meta_root[field])) if isinstance(meta_root[field], (list, dict)) else "", sources=sources, value=meta_new_short, source_list=source_list))
if isinstance(meta_new, dict) and field in ['posters', 'banners', 'art', 'themes', 'thumbs']:
for url in meta_new:
if not url in meta_old and isinstance(meta_new[url], tuple): metadata_download(metadata_root, meta_old, url, meta_new[url][0], meta_new[url][1], meta_new[url][2])
elif isinstance(meta_new, list) and field in MetaRoleList:
try:
meta_old.clear()
for item in meta_new:
meta_role = meta_old.new()
if not isinstance(item, dict): setattr(meta_role, 'name', item) #list of names instead of list of people, but should already be list of people
else:
for field in item:
if item[field]: setattr(meta_role, field, item[field])
except Exception as e: Log.Info("[!] {field:<29} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e))
else:
try: setattr(metadata, field, meta_new) #Type: {format:<20} #format=type(meta_old).__name__+"/"+type(meta_new).__name__,
except Exception as e: Log.Info("[!] {field:<29} Sources: {sources:<60} Value: {value} Exception: {error}".format(field=field, sources=sources, value=meta_new_short, error=e))
def UpdateMeta(metadata, media, movie, MetaSources, mappingList):
""" Update all metadata from a list of Dict according to set priorities
"""
Log.Info("=== common.UpdateMeta() ===".ljust(157, '='))
# Display source field table
Log.Info("Fields in Metadata Sources per movie/serie, season, episodes")
for source in MetaSources:
if MetaSources[source]: Log.Info("- {source:<11} : {fields}".format(source=source, fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '', field, len(MetaSources[source][field]) if isinstance(MetaSources[source][field], (list, dict)) else 1) for i, field in enumerate(MetaSources[source]))))
if type(MetaSources[source]).__name__ == 'NoneType': Log.Info("[!] source: '%s', type: '%s', bad return in function, should return an empty dict" % (source, type(MetaSources[source]).__name__))
if 'seasons' in (MetaSources[source] if MetaSources[source] else {}) :
season_fields, episode_fields, ep_nb, ep_invalid = {}, {}, 0, 0
for season in sorted(MetaSources[source]['seasons'], key=natural_sort_key):
for field in MetaSources[source]['seasons'][season]:
if field in FieldListSeasons: season_fields[field] = (season_fields[field] + 1) if field in season_fields else 1
elif not field=="episodes" and not field.isdigit(): Log.Info("Season Field Unrecognised: '{}' in source: '{}'".format(field, source))
for episode in sorted(MetaSources[source]['seasons'][season]['episodes'], key=natural_sort_key) if 'episodes' in MetaSources[source]['seasons'][season] else []:
for field in MetaSources[source]['seasons'][season]['episodes'][episode]:
if field in FieldListEpisodes: episode_fields[field] = episode_fields[field] + 1 if field in episode_fields else 1
elif field is not 'language_rank': Log.Info(" {:<23} Season {:>3}, Episode: {:>3} is not a valid metadata field, value: '{!s}'".format(field, season, episode, MetaSources[source]['seasons'][season]['episodes'][episode][field])); ep_invalid+=1
ep_nb+=1
if len(season_fields ): Log.Info(" - Seasons ({nb:>3}): {fields}".format(nb=len(MetaSources[source]['seasons']), fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '',field, season_fields[field]) for i, field in enumerate(season_fields))))
if len(episode_fields): Log.Info(" - Episodes ({nb:>3}): {fields}".format(nb=ep_nb-ep_invalid , fields=' | '.join('{}{:<23} ({:>3})'.format('\n ' if i%5==0 and i>0 else '',field, episode_fields[field]) for i, field in enumerate(episode_fields))))
Log.Info("".ljust(157, '-'))
#if AniDB_dict['originally_available_at']: AniDB_dict['year'] = AniDB_dict['originally_available_at'].year
### Metadata review display. Legend for the '[ ]' display:
# [=] already at the right value for that source
# [x] Xst/nd/th source had the field
# [#] no source for that field
# [!] Error assigning
#Update engine
Log.Info("Metadata Fields (items #), type, source provider, value")
count = {'posters':0, 'art':0, 'thumbs':0, 'banners':0, 'themes':0}
languages = Prefs['EpisodeLanguagePriority'].replace(' ', '').split(',')
#posters=[]
#fields = metadata.attrs.keys()
#if 'seasons' in fields: fields.remove('seasons')
for field in FieldListMovies if movie else FieldListSeries:
meta_old = getattr(metadata, field)
if field in ('posters', 'banners', 'art'): meta_old.validate_keys([]) #This will allow the images to get readded at the correct priority level if preferences are updates and meta is refreshed
source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, field) ]
language_rank, language_source = len(languages)+1, None
for source in [source.strip() for source in (Prefs[field].split('|')[0] if '|' in Prefs[field] else Prefs[field]).split(',') if Prefs[field]]:
if source in MetaSources:
#For AniDB assigned series will favor AniDB summary even if TheTVDB is before in the source order for summary fields IF the anidb series is not mapped to TheTVDB season 1.
if Dict(MetaSources, source, field):
if field=='genres'and ('|' in MetaSources[source]['genres'] or ',' in MetaSources[source]['genres']):
MetaSources[source]['genres'] = MetaSources[source]['genres'].split('|' if '|' in MetaSources[source]['genres'] else ',')
MetaSources[source]['genres'].extend( Other_Tags(media, movie, Dict(MetaSources, 'AniDB', 'status')) )
if field=='title':
title, rank = Dict(MetaSources, source, 'title'), Dict(MetaSources, source, 'language_rank')
if rank in (None, ''): rank = len(languages)
if rank<language_rank: MetaSources[source]['title_sort'], language_rank, language_source = SortTitle(title, IsIndex(languages, rank)), rank, source
else: UpdateMetaField(metadata, metadata, MetaSources[source], FieldListMovies if movie else FieldListSeries, field, source, movie, source_list)
if field in count: count[field] = count[field] + 1
if field!='title' and (field not in ['posters', 'art', 'banners', 'themes', 'thumbs', 'title']): break
elif not source=="None": Log.Info("[!] '{}' source not in MetaSources dict, please Check case and spelling".format(source))
else:
if field=='title': UpdateMetaField(metadata, metadata, Dict(MetaSources, language_source, default={}), FieldListMovies if movie else FieldListSeries, 'title', language_source, movie, source_list) #titles have multiple assignments, adding only once otherwise duplicated field outputs in logs
elif not Dict(count, field) and Prefs[field]!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list} Values: {values}".format(field=field, sources='' if field=='season' else Prefs[field], source_list=source_list, values=Dict(MetaSources, source, field)))
#if field=='posters': metadata.thumbs.validate_keys(meta_new.keys())
if not movie:
### AniDB poster as season poster backup ###
#if (metadata.id.startswith("tvdb") or max(map(int, media.seasons.keys())) >1) and Dict(mappingList, 'defaulttvdbseason'): # defaulttvdb season isdigit and assigned to 1 tvdb season (even if it is season 0)
# if Dict(MetaSources, 'AniDB', 'posters'): SaveDict(MetaSources['AniDB']['posters'], MetaSources, 'AniDB', 'seasons', Dict(mappingList, 'defaulttvdbseason') if Dict(mappingList, 'defaulttvdbseason').isdigit() else '1', 'posters')
# if Dict(MetaSources, 'AniDB', 'summary'): SaveDict(MetaSources['AniDB']['summary'], MetaSources, 'AniDB', 'seasons', Dict(mappingList, 'defaulttvdbseason') if Dict(mappingList, 'defaulttvdbseason').isdigit() else '1', 'summary')
### Seasons ###
#languages = Prefs['SerieLanguagePriority'].replace(' ', '').split(',')
#count = {'posters':0, 'art':0}
count = {'posters':0, 'art':0, 'thumbs':0, 'banners':0, 'themes':0} #@task #def UpdateEpisodes(metadata=metadata, MetaSources=MetaSources, count=count, season=season, episode=episode, cached_logs=cached_logs):
cached_logs = {}
#@parallelize
#def addMeta():
season_posters_list = []
for season in sorted(media.seasons, key=natural_sort_key): # For each season, media, then use metadata['season'][season]...
Log.Info(("metadata.seasons[{:>2}]".format(season)).ljust(157, '-'))
source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, 'seasons', season, field) ]
new_season = season
for field in FieldListSeasons: #metadata.seasons[season].attrs.keys()
meta_old = getattr(metadata.seasons[season], field)
if field in ('posters', 'banners', 'art'): meta_old.validate_keys([]) #This will allow the images to get readded at the correct priority level if preferences are updates and meta is refreshed
for source in [source.strip() for source in Prefs[field].split(',') if Prefs[field]]:
if source in MetaSources:
if Dict(MetaSources, source, 'seasons', season, field) or metadata.id.startswith('tvdb4'):
if field=='posters': season_posters_list.extend(Dict(MetaSources, source, 'seasons', season, 'posters', default={}).keys())
UpdateMetaField(metadata, metadata.seasons[season], Dict(MetaSources, source, 'seasons', season), FieldListSeasons, field, source, movie, source_list)
if field in count: count[field] = count[field] + 1
if field not in ['posters', 'art']: break
elif not source=="None": Log.Info("[!] {} Sources: '{}' not in MetaSources".format(field, source))
else:
if not Dict(count, field) and Dict(Prefs, field)!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list}".format(field=field, sources='' if field=='seasons' else Prefs[field], source_list=source_list))
### Episodes ###
languages = Prefs['EpisodeLanguagePriority'].replace(' ', '').split(',')
for episode in sorted(media.seasons[season].episodes, key=natural_sort_key):
Log.Info("metadata.seasons[{:>2}].episodes[{:>3}]".format(season, episode))
new_season, new_episode = season, episode
source_title, title, rank = '', '', len(languages)+1
for field in FieldListEpisodes: # metadata.seasons[season].episodes[episode].attrs.keys()
meta_old = getattr(metadata.seasons[season].episodes[episode], field)
source_list = [ source_ for source_ in MetaSources if Dict(MetaSources, source_, 'seasons', new_season, 'episodes', new_episode, field) ]
for source in [source_.strip() for source_ in (Prefs[field].split('|')[1] if '|' in Prefs[field] else Prefs[field]).split(',')]: #if shared by title and eps take later priority
if source in MetaSources:
if Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, field):
if field=='title':
language_rank = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'language_rank')
if language_rank not in ('', None) and language_rank < rank or len(languages)< rank: #Manage title language for AniDB and TheTVDB by recording the rank
source_title = source
title = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'title' )
rank = Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode, 'language_rank')
Log.Info('[?] rank: {:>1}, source_title: {:>7}, title: "{}"'.format(rank, source_title, title))
else:
Log.Info('[!] title: {}, language_rank {}, rank: {}, len(languages): "{}"'.format(title, language_rank, rank, len(languages)))
else:
UpdateMetaField(metadata, (metadata, season, episode, new_season, new_episode), Dict(MetaSources, source, 'seasons', new_season, 'episodes', new_episode), FieldListEpisodes, field, source, movie, source_list)
if field in count: count[field] = count[field] + 1
if field!='title' and (field not in ['posters', 'art', 'banners', 'themes', 'thumbs', 'title']): break
elif not source=="None": Log.Info("[!] '{}' source not in MetaSources dict, please Check case and spelling".format(source))
else:
if field=='title' and source_title: UpdateMetaField(metadata, (metadata, season, episode, new_season, new_episode), Dict(MetaSources, source_title, 'seasons', new_season, 'episodes', new_episode), FieldListEpisodes, field, source_title, movie, source_list)
elif not Dict(count, field) and field!='seasons' and Prefs[field]!="None" and source_list: Log.Info("[#] {field:<29} Sources: {sources:<60} Inside: {source_list}".format(field=field, sources='' if field=='seasons' else Prefs[field], source_list=source_list))
if field=='thumbs': metadata.seasons[season].episodes[episode].thumbs.validate_keys(meta_new.keys())
# End Of for field
# End Of for episode
else: metadata.seasons[season].posters.validate_keys(season_posters_list)
# End of for season
Log.Info("".ljust(157, '-'))
global downloaded; downloaded = {'posters':0, 'art':0, 'seasons':0, 'banners':0, 'themes':0, 'thumbs': 0}
def SortTitle(title, language="en"):
""" SortTitle
"""
dict_sort = { 'en': ["The", "A", "An"],
'fr': ["Le", "La", "Les", "L", "Un", "Une ", "Des "],
'sp': ["El", "La", "Las", "Lo", "Los", "Uno ", "Una "]
}
title = title.replace("'", " ")
prefix = title.split (" ", 1)[0] #Log.Info("SortTitle - title:{}, language:{}, prefix:{}".format(title, language, prefix))
return title.replace(prefix+" ", "", 1) if language in dict_sort and prefix in dict_sort[language] else title
def poster_rank(source, image_type, language='en', rank_adjustment=0):
"""
{ "id": "PosterLanguagePriority", "label": "TheTVDB Poster Language Priority", "type": "text", "default": ... },
{ "id": "posters", "label": "TS-M 'poster'", "type": "text", "default": ... },
{ "id": "art", "label": "T--M 'art'", "type": "text", "default": ... },
{ "id": "banners", "label": "TS-- 'banners'", "type": "text", "default": ... },
"""
max_rank = 100
if image_type == 'seasons': image_type = 'posters'
language_posters = [language.strip() for language in Prefs['PosterLanguagePriority'].split(',')]
priority_posters = [provider.strip() for provider in Prefs[image_type ].split(',')]
lp_len = len(language_posters)
pp_len = len(priority_posters)
lp_pos = language_posters.index(language) if language in language_posters else lp_len
pp_pos = priority_posters.index(source) if source in priority_posters else pp_len
lp_block_size = max_rank/lp_len
pp_block_size = lp_block_size/pp_len
rank = (lp_pos*lp_block_size)+(pp_pos*pp_block_size)+1+rank_adjustment
if rank > 100: rank = 100
if rank < 1: rank = 1
#Log.Info(" - language: {:<10}, lp_pos: {}, lp_block_size: {}, language_posters: {}".format(language, lp_pos, lp_block_size, language_posters))
#Log.Info(" - source: {:<10}, pp_pos: {}, pp_block_size: {}, priority_posters: {}".format(source, pp_pos, pp_block_size, priority_posters))
#Log.Info(" - image_type: {}, rank: {}".format(image_type, rank))
return rank
|
gpl-3.0
| 8,189,609,514,421,203,000
| 68.007491
| 382
| 0.627191
| false
| 3.468344
| false
| false
| false
|
flynx/pli
|
pli/pattern/proxy/_callproxy.py
|
1
|
9071
|
#=======================================================================
#=======================================================================
__version__ = '''0.1.00'''
__sub_version__ = '''20040223152229'''
__copyright__ = '''(c) Alex A. Naanou 2003'''
#-----------------------------------------------------------------------
##import sys
##import new
##import types
##import weakref
import operator
from pli.functional import *
#-----------------------------------------------------------------------
#
# WARNING: this is not yet complete!!!
#
# NOTE: this works about twice faster than the *clever* version.
##!! BUG: this apears not to work with the % operator (e.g. '%d' % pobj)
class callproxy(object):
'''
this is a dumb callproxy.
'''
__slots__ = ['p_obj', 'p_queue', 'p_cache', 'p_drop_refs', 'p_callback', 'p_safe', '__weakref__']
def __init__(self, obj, queue=None, cache=None, drop_refs=0, callback=None, safe=1):
# do some correctness checks
if safe:
# callback test
if callback != None:
if not callable(callback):
raise TypeError, 'callback object must be callable.'
elif queue == None:
raise TypeError, 'one of either callback or queue objects must be specified.'
# test if queue supports append
elif not hasattr(queue, 'append'):
raise TypeError, 'queue object must have an "append" method.'
# test if this supports dict interface
if cache != None and (not hasattr(cache, '__setitem__') or not hasattr(cache, '__getitem__') or not hasattr(cache, 'keys')):
raise TypeError, 'cache object must support "__setitem__", "__getitem__" and "keys" methods'
## # if this is true there is no point in this in the first place!
## elif callback == None and queue == None:
## raise TypeError, 'one of either callback or queue objects must be specified.'
osetattr = object.__setattr__
osetattr(self, 'p_obj', obj)
osetattr(self, 'p_queue', queue)
osetattr(self, 'p_cache', cache)
osetattr(self, 'p_drop_refs', drop_refs)
osetattr(self, 'p_callback', callback)
osetattr(self, 'p_safe', safe)
def __getattr__(self, name):
target = getattr(self.p_obj, name)
if self.p_cache != None and hasattr(self.p_cache, 'update'):
if target in self.p_cache.keys():
return self.p_cache[target]
else:
pobj = callproxy(target, self.p_queue, self.p_cache, self.p_drop_refs, self.p_callback, self.p_safe)
self.p_cache.update({target: pobj})
return pobj
return self.__class__(target, self.p_queue, self.p_cache, self.p_drop_refs, self.p_callback, self.p_safe)
def __call__(self, *p, **n):
# check if callable...
if not callable(self.p_obj):
self.p_obj(*p, **n)
if self.p_queue != None:
# queue the call
self.p_queue.append(curry(self.p_obj, *p, **n))
# do the callback.
if self.p_callback != None:
return self.p_callback(*(self.p_obj,) + p, **n)
return None
elif self.p_callback != None:
return self.p_callback(*(self.p_obj,) + p, **n)
# we get here if safe is False...
# WARNING: this is currently incompatible with the python version!
return self.p_obj(*p, **n)
def __setattr__(self, name, val):
setattr(self.p_obj, name, val)
def __delattr__(self, name):
delattr(self.p_obj, name)
def __repr__(self):
return '<callproxy at %s to %.100s at %s>' % (hex(id(self)), self.p_obj.__class__.__name__, hex(id(self.p_obj)))
def __str__(self):
return str(self.p_obj)
def __iter__(self):
return self.p_obj.__iter__()
def __hash__(self):
return hash(self.p_obj)
def __nonzero__(self):
if hasattr(self.p_obj, '__nonzero__'):
return self.p_obj.__nonzero__()
elif hasattr(self.p_obj, '__len__'):
return len(self.p_obj)
else:
return 1
def __len__(self):
return len(self.p_obj)
def __unicode__(self):
return self.p_obj.__unicode__()
def __cmp__(self, other):
# get the original type if the other side is callproxy
if isinstance(other, callproxy):
other = other.p_obj
return cmp(self.p_obj, other)
## def __lt__(self, other):
## return self.p_obj.__lt__(other)
## def __le__(self, other):
## return self.p_obj.__le__(other)
## def __eq__(self, other):
## return self.p_obj.__eq__(other)
## def __ne__(self, other):
## return self.p_obj.__ne__(other)
## def __gt__(self, other):
## return self.p_obj.__gt__(other)
## def __ge__(self, other):
## return self.p_obj.__ge__(other)
# number interface...
# NOTE: if you have the strength to read this section, be my guest!
# (I did not even have the strength to write it.. :) )
def __add__(x, y):
if isinstance(x, callproxy):
x = (x).p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__add__(x, y)
def __sub__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__sub__(x, y)
def __mul__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__mul__(x, y)
def __floordiv__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__floordiv__(x, y)
def __truediv__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__truediv__(x, y)
def __div__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__div__(x, y)
def __mod__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__mod__(x, y)
def __divmod__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__divmod__(y)
def __pow__(x, y, z):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
if isinstance(z, callproxy):
z = z.p_obj
return x.__pow__(y, z)
def __neg__(self):
return operator.__neg__(self.p_obj)
def __pos__(self):
return operator.__pos__(self.p_obj)
def __abs__(self):
return operator.__abs__(self.p_obj)
def __invert__(self):
return operator.__invert__(self.p_obj)
def __lshift__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__lshift__(x, y)
def __rshift__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__rshift__(x, y)
def __and__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__and__(x, y)
def __xor__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__xor__(x, y)
def __or__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return operator.__or__(x, y)
def __int__(self):
return int(self.p_obj)
def __long__(self):
return long(self.p_obj)
def __float__(self):
return float(self.p_obj)
def __oct__(self):
return oct(self.p_obj)
def __hex__(self):
return hex(self.p_obj)
def __iadd__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__iadd__(y)
def __isub__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__isub__(y)
def __imul__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__imul__(y)
def __ifloordiv__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__ifloordiv__(y)
def __itruediv__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__itruediv__(y)
def __idiv__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__idiv__(y)
def __imod__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__imod__(y)
def __ipow__(x, y, z):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
if isinstance(z, callproxy):
z = z.p_obj
return x.__ipow__(y, z)
def __ilshift__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__ilshift__(y)
def __irshift__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__irshift__(y)
def __iand__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__iand__(y)
def __ixor__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__ixor__(y)
def __ior__(x, y):
if isinstance(x, callproxy):
x = x.p_obj
if isinstance(y, callproxy):
y = y.p_obj
return x.__ior__(y)
##!!!
#=======================================================================
# vim:set ts=4 sw=4 nowrap :
|
bsd-3-clause
| 6,048,693,589,287,850,000
| 27.615142
| 127
| 0.578216
| false
| 2.748788
| false
| false
| false
|
djtotten/workbench
|
setup.py
|
1
|
1964
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from setuptools import setup
readme = open('README.rst').read()
long_description = readme
doclink = '''
Documentation
-------------
The full documentation is at http://workbench.rtfd.org. '''
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
exec(open('workbench/server/version.py').read())
setup(
name='workbench',
version=__version__,
description='A scalable framework for security research and development teams.',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='The Workbench Team',
author_email='support@supercowpowers.com',
url='http://github.com/SuperCowPowers/workbench',
packages=['workbench', 'workbench.server',
'workbench.server.bro', 'workbench.workers',
'workbench.workers.rekall_adapter',
'workbench.clients', 'workbench_apps', 'workbench_apps.workbench_cli'],
package_dir={'workbench': 'workbench', 'workbench_apps': 'workbench_apps'},
include_package_data=True,
scripts=['workbench/server/workbench_server', 'workbench_apps/workbench_cli/workbench'],
tests_require=['tox'],
install_requires=['cython', 'distorm3', 'elasticsearch', 'funcsigs', 'flask', 'filemagic',
'ipython', 'lz4', 'mock', 'numpy', 'pandas', 'pefile',
'py2neo==1.6.4', 'pymongo', 'pytest', 'rekall==1.0.3', 'requests',
'ssdeep==2.9-0.3', 'urllib3', 'yara', 'zerorpc', 'cython'],
license='MIT',
zip_safe=False,
keywords='workbench security python',
classifiers=[
'Topic :: Security',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Unix',
'Natural Language :: English',
'Programming Language :: Python :: 2.7'
]
)
|
mit
| -9,168,152,055,158,455,000
| 37.509804
| 95
| 0.611507
| false
| 3.664179
| false
| false
| false
|
heeraj123/oh-mainline
|
mysite/missions/svn/views.py
|
1
|
7651
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2010 John Stumpo
# Copyright (C) 2011 Krzysztof Tarnowski (krzysztof.tarnowski@ymail.com)
# Copyright (C) 2010, 2011 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shutil
import tempfile
import json
from django.shortcuts import render
from mysite.missions.base.views import *
from mysite.missions.svn import forms, view_helpers
# POST handlers
# Helper functions for form submissions. These functions are used to validate
# input and/or modify the stored user information about missions, such as
# recording that a mission was successfully completed.
@login_required
def resetrepo(request):
""" Reset a user's mission repository and mark steps as uncompleted. """
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
view_helpers.SvnRepository(request.user.username).reset()
view_helpers.unset_mission_completed(request.user.get_profile(),
'svn_checkout')
view_helpers.unset_mission_completed(request.user.get_profile(),
'svn_diff')
view_helpers.unset_mission_completed(request.user.get_profile(),
'svn_commit')
if 'stay_on_this_page' in request.GET:
return HttpResponseRedirect(reverse('svn_main_page'))
else:
return HttpResponseRedirect(reverse('svn_checkout'))
@login_required
def diff_submit(request):
""" Handle submitting the results of an svn diff to the mission """
data = {}
data['svn_diff_form'] = forms.DiffForm(request.user.username)
data['svn_diff_error_message'] = ''
if request.method == 'POST':
temp_svn_directory = tempfile.mkdtemp()
form = forms.DiffForm(request.user.username, temp_svn_directory,
request.POST)
if form.is_valid():
try:
form.commit_diff()
view_helpers.set_mission_completed(request.user.get_profile(),
'svn_diff')
return HttpResponseRedirect(reverse('svn_diff'))
finally:
shutil.rmtree(temp_svn_directory)
shutil.rmtree(temp_svn_directory)
data['svn_diff_form'] = form
# If we get here, just hack up the request object to pretend it is a GET
# so the dispatch system in the class-based view can use the GET handler.
request.method = 'GET'
return Diff.as_view()(request, extra_context_data=data)
@login_required
def checkout_submit(request):
""" Handle svn checkout mission step form and completion """
data = {}
data['svn_checkout_form'] = forms.CheckoutForm(request.user.username)
data['svn_checkout_error_message'] = ''
if request.method == 'POST':
form = forms.CheckoutForm(request.user.username, request.POST)
if form.is_valid():
view_helpers.set_mission_completed(request.user.get_profile(),
'svn_checkout')
return HttpResponseRedirect(reverse('svn_checkout'))
data['svn_checkout_form'] = form
# If we get here, just hack up the request object to pretend it is a GET
# so the dispatch system in the class-based view can use the GET handler.
request.method = 'GET'
return Checkout.as_view()(request, extra_context_data=data)
class SvnBaseView(mysite.missions.base.views.MissionBaseView):
"""
A base class for a view of an SVN mission step.
SVNBaseView is subclassed to provide GET handler classes to help with
views of each mission step.
"""
mission_name = 'Using Subversion'
def get_context_data(self, *args, **kwargs):
# For now, we use the MissionPageState object to track a few things.
# Eventually, the missions base will stop using the PageState object,
# and all the work that class does will get merged into
# MissionBaseView.
data = super(SvnBaseView, self).get_context_data(*args, **kwargs)
state = MissionPageState(
self.request, passed_data=None, mission_name=self.mission_name)
new_data, person = state.get_base_data_dict_and_person()
if person:
repo = view_helpers.SvnRepository(self.request.user.username)
new_data.update({
'repository_exists': repo.exists(),
'svn_checkout_done': view_helpers.mission_completed(person, 'svn_checkout'),
'svn_diff_done': view_helpers.mission_completed(person, 'svn_diff'),
'svn_commit_done': view_helpers.mission_completed(person, 'svn_commit'),
})
if new_data['repository_exists']:
new_data.update({
'checkout_url': repo.public_trunk_url(),
'secret_word_file': forms.CheckoutForm.SECRET_WORD_FILE,
'file_for_svn_diff': forms.DiffForm.FILE_TO_BE_PATCHED,
'new_secret_word': view_helpers.SvnCommitMission.NEW_SECRET_WORD,
'commit_username': self.request.user.username,
'commit_password': repo.get_password()})
data.update(new_data)
return data
# Normal GET handlers. These are usually pretty short. They are based on
# SvnBaseView.
class MainPage(SvnBaseView):
""" Main start page of the SVN mission """
this_mission_page_short_name = 'Start page'
template_name = 'missions/svn/main_page.html'
class LongDescription(SvnBaseView):
""" Page with detailed information on SVN """
this_mission_page_short_name = 'About Subversion'
template_name = 'missions/svn/about_svn.html'
class Checkout(SvnBaseView):
""" Checkout step of SVN mission """
login_required = True
this_mission_page_short_name = 'Checking out'
template_name = 'missions/svn/checkout.html'
def get_context_data(self, *args, **kwargs):
data = super(Checkout, self).get_context_data(*args, **kwargs)
if kwargs.has_key('extra_context_data'):
data.update(kwargs['extra_context_data'])
else:
data['svn_checkout_form'] = forms.CheckoutForm()
return data
class Diff(SvnBaseView):
""" Diff step of the SVN mission """
login_required = True
this_mission_page_short_name = 'Diffing your changes'
mission_step_prerequisite = 'svn_checkout'
template_name = 'missions/svn/diff.html'
def get_context_data(self, *args, **kwargs):
data = super(Diff, self).get_context_data(*args, **kwargs)
if kwargs.has_key('extra_context_data'):
data.update(kwargs['extra_context_data'])
return data
class Commit(SvnBaseView):
""" Committing changes step of SVN mission"""
login_required = True
this_mission_page_short_name = 'Committing your changes'
mission_step_prerequisite = 'svn_diff'
template_name = 'missions/svn/commit.html'
@login_required
def commit_poll(request):
""" Determines if entire mission is completed """
return HttpResponse(json.dumps(view_helpers.mission_completed(request.user.get_profile(), 'svn_commit')))
|
agpl-3.0
| -5,017,947,890,406,898,000
| 39.696809
| 109
| 0.665534
| false
| 3.868049
| false
| false
| false
|
saullocastro/structMan
|
structmanager/sas.py
|
2
|
2784
|
"""
Structural Assemblies - SAs (:mod:`structmanager.sas`)
======================================================
.. currentmodule:: structmanager.sas
"""
class FrameAssembly(object):
"""Frame Assembly"""
def __init__(self, name, args):
args = outerflange, web, innerflange
self.name = name
self.outerflange = outerflange
self.web = web
self.innerflange = innerflange
def __str__(self):
return ('FrameAssembly: ' + self.name +
'\n-' + str(self.outerflange) +
'\n-' + str(self.web) +
'\n-' + str(self.innerflange)
)
def __repr__(self):
return str(self)
class FrameShearClipAssembly(object):
"""Frame Assembly with Shear Clip"""
def __init__(self, name, args):
shearclipskin, shearclipframe, outerflange, web, innerflange = args
self.name = name
self.shearclipskin = shearclipskin
self.shearclipframe = shearclipframe
self.outerflange = outerflange
self.web = web
self.innerflange = innerflange
def __str__(self):
return ('FrameShearClipAssembly: ' + self.name +
'\n-' + str(self.shearclipskin) +
'\n-' + str(self.shearclipframe) +
'\n-' + str(self.outerflange) +
'\n-' + str(self.web) +
'\n-' + str(self.innerflange)
)
def __repr__(self):
return str(self)
class StiffenedPanelAssembly(object):
"""Stiffened Panel Assembly"""
def __init__(self, name, args):
panel, fr1, fr2, str1, str2 = args
self.name = name
self.panel = panel
self.fr1 = fr1
self.fr2 = fr2
self.str1 = str1
self.str2 = str2
def __str__(self):
return ('Stiffened Panel Assembly: ' + self.name +
'\n-' + str(self.panel) +
'\n-' + str(self.fr1) +
'\n-' + str(self.fr2) +
'\n-' + str(self.str1) +
'\n-' + str(self.str2))
def __repr__(self):
return str(self)
class StiffenedPanelCutout(object):
"""Stiffened Panel Cutout"""
def __init__(self, name, args):
panelcutout, str1, str2 = args
self.name = name
self.panelcutout = panelcutout
self.str1 = str1
self.str2 = str2
def __str__(self):
return ('Stiffened Panel Cutout: ' + self.name +
'\n-' + str(self.panelcutout) +
'\n-' + str(self.str1) +
'\n-' + str(self.str2))
def __repr__(self):
return str(self)
sa_classes = [
FrameAssembly,
FrameShearClipAssembly,
StiffenedPanelAssembly,
StiffenedPanelCutout,
]
|
bsd-3-clause
| 789,567,808,038,623,600
| 27.408163
| 75
| 0.510057
| false
| 3.532995
| false
| false
| false
|
fumitoh/modelx
|
modelx/tests/testdata/pandas_compat/fastlife/Projection/Policy/__init__.py
|
1
|
10940
|
"""Source module to create ``Policy`` space from.
This module is a source module to create ``Policy`` space and its
sub spaces from.
The formulas of the cells in the ``Policy`` space are created from the
functions defined in this module.
The ``Policy`` space is the base space of the policy spaces
for individual policies, which are derived from and belong to
the ``Policy`` space as its dynamic child spaces.
The policy spaces for individual policies are parametrized by ``PolicyID``.
For example, to get the policy space of the policy whose ID is 171::
>> pol = model.Policy(171)
The cells in a policy space for each individual policy retrieve
input data, calculate and hold values of policy attributes specific to that policy,
so various spaces in :mod:`Input<simplelife.build_input>` must be accessible
from the ``Policy`` space.
.. rubric:: Projects
This module is included in the following projects.
* :mod:`simplelife`
* :mod:`nestedlife`
* :mod:`ifrs17sim`
* :mod:`solvency2`
.. rubric:: Space Parameters
Attributes:
PolicyID: Policy ID
.. rubric:: References in Base
Attributes:
PolicyData: Input.PolicyData
ProductSpec: Input.ProductSpec
LifeTable: LifeTable
Gen: Generation key
.. rubric:: References in Sub
Attributes:
Product: Product key
PolicyType: Policy type key
Gen: Generation key
Channel: Channel key
Sex: ``M`` for Male, ``F`` for Female
Duration: Number of years lapsed. 0 for new business
IssueAge: Issue age
PremFreq: Number of premium payments per year. 12 for monthly payments
PolicyTerm: Policy term in year
PolicyCount: Number of policies
SumAssured: Sum Assured per policy
"""
from modelx.serialize.jsonvalues import *
_formula = None
_bases = []
_allow_none = None
_spaces = []
# ---------------------------------------------------------------------------
# Cells
def AnnPremRate():
"""Annualized Premium Rate per Sum Assured"""
return GrossPremRate() * PremFreq().where(PremFreq() != 0, other=1/10)
def CashValueRate(t):
"""Cash Value Rate per Sum Assured"""
return np.maximum(ReserveNLP_Rate('PREM', t) - SurrCharge(t), 0)
def GrossPremRate():
"""Gross Premium Rate per Sum Assured per payment"""
data = pd.concat([PolicyData,
LoadAcqSA(),
LoadMaintPrem(),
LoadMaintPrem2(),
LoadMaintSA(),
LoadMaintSA2(),
IntRate('PREM'),
TableID('PREM')], axis=1)
def get_value(pol):
prod = pol['Product']
alpha = pol['LoadAcqSA']
beta = pol['LoadMaintPrem']
delta = pol['LoadMaintPrem2']
gamma = pol['LoadMaintSA']
gamma2 = pol['LoadMaintSA2']
freq = pol['PremFreq']
x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm']
comf = LifeTable[pol['Sex'], pol['IntRate_PREM'], pol['TableID_PREM']]
if prod == 'TERM' or prod == 'WL':
return (comf.Axn(x, n) + alpha + gamma * comf.AnnDuenx(x, n, freq)
+ gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / (1-beta-delta) / freq / comf.AnnDuenx(x, m, freq)
elif prod == 'ENDW':
return (comf.Exn(x, n) + comf.Axn(x, n) + alpha + gamma * comf.AnnDuenx(x, n, freq)
+ gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / (1-beta-delta) / freq / comf.AnnDuenx(x, m, freq)
else:
raise ValueError('invalid product')
result = data.apply(get_value, axis=1)
result.name = 'GrossPremRate'
return result
def GrossPremTable():
"""Gross premium table"""
return None
def InitSurrCharge():
"""Initial Surrender Charge Rate"""
def get_value(pol):
prod, polt, gen = pol['Product'], pol['PolicyType'], pol['Gen']
term = pol['PolicyTerm']
param1 = SpecLookup.match("SurrChargeParam1", prod, polt, gen).value
param2 = SpecLookup.match("SurrChargeParam2", prod, polt, gen).value
if param1 is None or param2 is None:
raise ValueError('SurrChargeParam not found')
return param1 + param2 * min(term / 10, 1)
result = PolicyData.apply(get_value, axis=1)
result.name = 'InitSurrCharge'
return result
def IntRate(RateBasis):
"""Interest Rate"""
if RateBasis == 'PREM':
basis = 'IntRatePrem'
elif RateBasis == 'VAL':
basis = 'IntRateVal'
else:
raise ValueError('invalid RateBasis')
def get_value(pol):
result = SpecLookup.match(basis,
pol["Product"],
pol["PolicyType"],
pol["Gen"]).value
if result is not None:
return result
else:
raise ValueError('lookup failed')
result = PolicyData.apply(get_value, axis=1)
result.name = 'IntRate_' + RateBasis
return result
def LoadAcqSA():
"""Acquisition Loading per Sum Assured"""
param1 = Product().apply(lambda prod: SpecLookup("LoadAcqSAParam1", prod))
param2 = Product().apply(lambda prod: SpecLookup("LoadAcqSAParam2", prod))
result = param1 + param2 * np.minimum(PolicyTerm() / 10, 1)
result.name = 'LoadAcqSA'
return result
def LoadMaintPrem():
"""Maintenance Loading per Gross Premium"""
def get_value(pol):
if SpecLookup("LoadMaintPremParam1", pol["Product"]) is not None:
return SpecLookup("LoadMaintPremParam1", pol["Product"])
elif SpecLookup("LoadMaintPremParam2", pol["Product"]) is not None:
param = SpecLookup("LoadMaintPremParam2", pol["Product"])
return (param + min(10, pol["PolicyTerm"])) / 100
else:
raise ValueError('LoadMaintPrem parameters not found')
result = PolicyData.apply(get_value, axis=1)
result.name = 'LoadMaintPrem'
return result
def LoadMaintSA():
"""Maintenance Loading per Sum Assured during Premium Payment"""
def get_value(pol):
result = SpecLookup.match("LoadMaintSA",
pol["Product"],
pol["PolicyType"],
pol["Gen"]).value
if result is not None:
return result
else:
raise ValueError('lookup failed')
result = PolicyData.apply(get_value, axis=1)
result.name = 'LoadMaintSA'
return result
def LoadMaintSA2():
"""Maintenance Loading per Sum Assured after Premium Payment"""
def get_value(pol):
result = SpecLookup.match("LoadMaintSA2",
pol["Product"],
pol["PolicyType"],
pol["Gen"]).value
if result is not None:
return result
else:
raise ValueError('lookup failed')
result = PolicyData.apply(get_value, axis=1)
result.name = 'LoadMaintSA2'
return result
def NetPremRate(basis):
"""Net Premium Rate"""
data = pd.concat([PolicyData,
LoadMaintSA2(),
IntRate(basis),
TableID(basis)], axis=1)
def get_value(pol):
prod = pol['Product']
gamma2 = pol['LoadMaintSA2']
x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm']
comf = LifeTable[pol['Sex'], pol['IntRate_' + basis], pol['TableID_' + basis]]
if prod == 'TERM' or prod == 'WL':
return (comf.Axn(x, n) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / comf.AnnDuenx(x, n)
elif prod == 'ENDW':
return (comf.Axn(x, n) + gamma2 * comf.AnnDuenx(x, n-m, 1, m)) / comf.AnnDuenx(x, n)
else:
raise ValueError('invalid product')
result = data.apply(get_value, axis=1)
result.name = 'NetPremRate_' + basis
return result
def ReserveNLP_Rate(basis, t):
"""Net level premium reserve rate"""
data = pd.concat([PolicyData,
LoadMaintSA2(),
IntRate(basis),
TableID(basis),
NetPremRate(basis)], axis=1)
def get_value(pol):
prod = pol['Product']
gamma2 = pol['LoadMaintSA2']
netp = pol['NetPremRate_' + basis]
x, n, m = pol['IssueAge'], pol['PolicyTerm'], pol['PolicyTerm']
lt = LifeTable[pol['Sex'], pol['IntRate_' + basis], pol['TableID_' + basis]]
if t <= m:
return lt.Axn(x+t, n-t) + (gamma2 * lt.AnnDuenx(x+t, n-m, 1, m-t)
- netp * lt.AnnDuenx(x+t, m-t))
elif t <=n:
return lt.Axn(x+t, n-t) + gamma2 * lt.AnnDuenx(x+t, n-m, 1, m-t)
else:
return 0
result = data.apply(get_value, axis=1)
result.name = 'ReserveNLP_Rate'
return result
def ReserveRate():
"""Valuation Reserve Rate per Sum Assured"""
return None
def SurrCharge(t):
"""Surrender Charge Rate per Sum Assured"""
m = PremTerm()
return InitSurrCharge * np.maximum((np.minimum(m, 10) - t) / np.minimum(m, 10), 0)
def TableID(RateBasis):
"""Mortality Table ID"""
if RateBasis == 'PREM':
basis = "MortTablePrem"
elif RateBasis == 'VAL':
basis = "MortTableVal"
else:
raise ValueError('invalid RateBasis')
def get_value(pol):
result = SpecLookup.match(basis,
pol["Product"],
pol["PolicyType"],
pol["Gen"]).value
if result is not None:
return result
else:
raise ValueError('lookup failed')
result = PolicyData.apply(get_value, axis=1)
result.name = 'TableID_' + RateBasis
return result
def UernPremRate():
"""Unearned Premium Rate"""
return None
Product = lambda: PolicyData['Product']
PolicyType = lambda: PolicyData['PolicyType']
Gen = lambda: PolicyData['Gen']
Channel = lambda: PolicyData['Channel']
Sex = lambda: PolicyData['Sex']
Duration = lambda: PolicyData['Duration']
IssueAge = lambda: PolicyData['IssueAge']
PremFreq = lambda: PolicyData['PremFreq']
PolicyTerm = lambda: PolicyData['PolicyTerm']
PolicyCount = lambda: PolicyData['PolicyCount']
SumAssured = lambda: PolicyData['SumAssured']
def LoadMaintPrem2():
"""Maintenance Loading per Gross Premium for Premium Waiver"""
result = pd.Series(0.002, index=PolicyData.index)
result[PremTerm < 10] = 0.001
result[PremTerm < 5] = 0.0005
result.name = 'LoadMaintPrem2'
return result
# ---------------------------------------------------------------------------
# References
LifeTable = ("Interface", ("...", "LifeTable"), "auto")
PolicyData = ("Pickle", 2310405372040)
SpecLookup = ("Interface", ("...", "Input", "SpecLookup"), "auto")
PremTerm = ("Interface", (".", "PolicyTerm"), "auto")
|
gpl-3.0
| -2,055,454,818,524,421,000
| 25.300481
| 111
| 0.577148
| false
| 3.583361
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/connectivity_parameters_py3.py
|
1
|
1429
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityParameters(Model):
"""Parameters that determine how the connectivity check will be performed.
All required parameters must be populated in order to send to Azure.
:param source: Required.
:type source: ~azure.mgmt.network.v2017_11_01.models.ConnectivitySource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2017_11_01.models.ConnectivityDestination
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectivitySource'},
'destination': {'key': 'destination', 'type': 'ConnectivityDestination'},
}
def __init__(self, *, source, destination, **kwargs) -> None:
super(ConnectivityParameters, self).__init__(**kwargs)
self.source = source
self.destination = destination
|
mit
| -6,371,394,010,488,108,000
| 34.725
| 81
| 0.610917
| false
| 4.669935
| false
| false
| false
|
projectshift/shift-memory
|
setup.py
|
1
|
2472
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# ----------------------------------------------------------------------------
# Building
#
# Create source distribution:
# ./setup.py sdist
#
#
# Create binary distribution (non-univeral, python 3 only):
# ./setup.py bdist_wheel --python-tag=py3
#
# Register on PyPI:
# twine register dist/mypkg.whl
#
#
# Upload to PyPI:
# twine upload dist/*
#
# ----------------------------------------------------------------------------
# project version
version = '0.0.8'
# development status
# dev_status = '1 - Planning'
# dev_status = '2 - Pre-Alpha'
dev_status = '3 - Alpha'
# dev_status = '4 - Beta'
# dev_status = '5 - Production/Stable'
# dev_status = '6 - Mature'
# dev_status = '7 - Inactive'
# github repository url
repo = 'https://github.com/projectshift/shift-memory'
license_type = 'MIT License'
# monkey patch os for vagrant hardlinks
del os.link
# run setup
setup(**dict(
# author
author='Dmitry Belyakov',
author_email='dmitrybelyakov@gmail.com',
# project meta
name='shiftmemory',
version=version,
url=repo,
download_url=repo + '/archive/v' + version + '.tar.gz',
description='Python3 cache library',
keywords=[
'python3',
'cache',
'redis',
],
# classifiers
# see: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# maturity
'Development Status :: ' + dev_status,
# license
'License :: OSI Approved :: ' + license_type,
# audience
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
# pythons
'Programming Language :: Python :: 3',
# categories
'Environment :: Console',
'Environment :: Web Environment',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
# project packages
packages=find_packages(exclude=['tests*']),
# include none-code data files from manifest.in (http://goo.gl/Uf0Yxc)
include_package_data=True,
# project dependencies
install_requires=[
'click>=7.0,<8.0',
'redis>=3.2.1,<4.0.0',
'hiredis>=1.0.0,<2.0.0',
'arrow>=0.13.1,<1.0.0'
],
# entry points
entry_points=dict(
console_scripts=[
'shiftmemory = shiftmemory.cli.console:cli'
]
),
# project license
license=license_type
))
|
mit
| -3,527,569,901,544,307,700
| 21.472727
| 78
| 0.565534
| false
| 3.662222
| false
| false
| false
|
agdsn/pycroft
|
tests/frontend/test_fields.py
|
1
|
1257
|
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import string
import pytest
from wtforms_widgets.fields.core import DateField
__author__ = 'shreyder'
def test_date_field_format_strings():
for directive, replacement in DateField.supported_directives.items():
assert DateField.convert_format_string("%" + directive) == replacement
assert DateField.convert_format_string("%%" + directive) == "%" + directive
for directive in DateField.unsupported_directives:
with pytest.raises(ValueError):
DateField.convert_format_string("%" + directive)
assert DateField.convert_format_string("%%" + directive) == "%" + directive
unknown_directives = set(string.ascii_letters).difference(
set(DateField.supported_directives.keys()),
set(DateField.unsupported_directives)
)
for directive in unknown_directives:
with pytest.raises(ValueError):
DateField.convert_format_string("%" + directive)
assert DateField.convert_format_string("%%") == "%"
assert DateField.convert_format_string("%%%%") == "%%"
|
apache-2.0
| 4,335,578,972,140,562,000
| 38.28125
| 83
| 0.692124
| false
| 4.218121
| false
| false
| false
|
testmana2/test
|
Helpviewer/VirusTotal/VirusTotalApi.py
|
1
|
15389
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the <a href="http://www.virustotal.com">VirusTotal</a>
API class.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import json
from PyQt5.QtCore import QObject, QUrl, QByteArray, pyqtSignal, qVersion
from PyQt5.QtNetwork import QNetworkRequest, QNetworkReply
from E5Gui import E5MessageBox
import Preferences
class VirusTotalAPI(QObject):
"""
Class implementing the <a href="http://www.virustotal.com">VirusTotal</a>
API.
@signal checkServiceKeyFinished(bool, str) emitted after the service key
check has been performed. It gives a flag indicating validity
(boolean) and an error message in case of a network error (string).
@signal submitUrlError(str) emitted with the error string, if the URL scan
submission returned an error.
@signal urlScanReport(str) emitted with the URL of the URL scan report page
@signal fileScanReport(str) emitted with the URL of the file scan report
page
"""
checkServiceKeyFinished = pyqtSignal(bool, str)
submitUrlError = pyqtSignal(str)
urlScanReport = pyqtSignal(str)
fileScanReport = pyqtSignal(str)
TestServiceKeyScanID = \
"4feed2c2e352f105f6188efd1d5a558f24aee6971bdf96d5fdb19c197d6d3fad"
ServiceResult_ItemQueued = -2
ServiceResult_ItemNotPresent = 0
ServiceResult_ItemPresent = 1
# HTTP Status Codes
ServiceCode_InvalidKey = 202
ServiceCode_RateLimitExceeded = 204
ServiceCode_InvalidPrivilege = 403
GetFileReportPattern = "{0}://www.virustotal.com/vtapi/v2/file/report"
ScanUrlPattern = "{0}://www.virustotal.com/vtapi/v2/url/scan"
GetUrlReportPattern = "{0}://www.virustotal.com/vtapi/v2/url/report"
GetIpAddressReportPattern = \
"{0}://www.virustotal.com/vtapi/v2/ip-address/report"
GetDomainReportPattern = "{0}://www.virustotal.com/vtapi/v2/domain/report"
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(VirusTotalAPI, self).__init__(parent)
self.__replies = []
self.__loadSettings()
self.__lastIP = ""
self.__lastDomain = ""
self.__ipReportDlg = None
self.__domainReportDlg = None
def __loadSettings(self):
"""
Private method to load the settings.
"""
if Preferences.getHelp("VirusTotalSecure"):
protocol = "https"
else:
protocol = "http"
self.GetFileReportUrl = self.GetFileReportPattern.format(protocol)
self.ScanUrlUrl = self.ScanUrlPattern.format(protocol)
self.GetUrlReportUrl = self.GetUrlReportPattern.format(protocol)
self.GetIpAddressReportUrl = self.GetIpAddressReportPattern.format(
protocol)
self.GetDomainReportUrl = self.GetDomainReportPattern.format(protocol)
self.errorMessages = {
204: self.tr("Request limit has been reached."),
0: self.tr("Requested item is not present."),
-2: self.tr("Requested item is still queued."),
}
def preferencesChanged(self):
"""
Public slot to handle a change of preferences.
"""
self.__loadSettings()
def checkServiceKeyValidity(self, key, protocol=""):
"""
Public method to check the validity of the given service key.
@param key service key (string)
@param protocol protocol used to access VirusTotal (string)
"""
if protocol == "":
urlStr = self.GetFileReportUrl
else:
urlStr = self.GetFileReportPattern.format(protocol)
request = QNetworkRequest(QUrl(urlStr))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
key, self.TestServiceKeyScanID).encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__checkServiceKeyValidityFinished)
self.__replies.append(reply)
def __checkServiceKeyValidityFinished(self):
"""
Private slot to determine the result of the service key validity check.
"""
res = False
msg = ""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
res = True
elif reply.error() == self.ServiceCode_InvalidKey:
res = False
else:
msg = reply.errorString()
self.__replies.remove(reply)
reply.deleteLater()
self.checkServiceKeyFinished.emit(res, msg)
def submitUrl(self, url):
"""
Public method to submit an URL to be scanned.
@param url url to be scanned (QUrl)
"""
request = QNetworkRequest(QUrl(self.ScanUrlUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&url=".format(
Preferences.getHelp("VirusTotalServiceKey")).encode("utf-8"))\
.append(QUrl.toPercentEncoding(url.toString()))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__submitUrlFinished)
self.__replies.append(reply)
def __submitUrlFinished(self):
"""
Private slot to determine the result of the URL scan submission.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == self.ServiceResult_ItemPresent:
self.urlScanReport.emit(result["permalink"])
self.__getUrlScanReportUrl(result["scan_id"])
else:
if result["response_code"] in self.errorMessages:
msg = self.errorMessages[result["response_code"]]
else:
msg = result["verbose_msg"]
self.submitUrlError.emit(msg)
elif reply.error() == self.ServiceCode_RateLimitExceeded:
self.submitUrlError.emit(
self.errorMessages[result[self.ServiceCode_RateLimitExceeded]])
else:
self.submitUrlError.emit(reply.errorString())
self.__replies.remove(reply)
reply.deleteLater()
def __getUrlScanReportUrl(self, scanId):
"""
Private method to get the report URL for a URL scan.
@param scanId ID of the scan to get the report URL for (string)
"""
request = QNetworkRequest(QUrl(self.GetUrlReportUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
Preferences.getHelp("VirusTotalServiceKey"), scanId)
.encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__getUrlScanReportUrlFinished)
self.__replies.append(reply)
def __getUrlScanReportUrlFinished(self):
"""
Private slot to determine the result of the URL scan report URL
request.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if "filescan_id" in result and result["filescan_id"] is not None:
self.__getFileScanReportUrl(result["filescan_id"])
self.__replies.remove(reply)
reply.deleteLater()
def __getFileScanReportUrl(self, scanId):
"""
Private method to get the report URL for a file scan.
@param scanId ID of the scan to get the report URL for (string)
"""
request = QNetworkRequest(QUrl(self.GetFileReportUrl))
request.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
params = QByteArray("apikey={0}&resource={1}".format(
Preferences.getHelp("VirusTotalServiceKey"), scanId)
.encode("utf-8"))
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.post(request, params)
reply.finished.connect(self.__getFileScanReportUrlFinished)
self.__replies.append(reply)
def __getFileScanReportUrlFinished(self):
"""
Private slot to determine the result of the file scan report URL
request.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
self.fileScanReport.emit(result["permalink"])
self.__replies.remove(reply)
reply.deleteLater()
def getIpAddressReport(self, ipAddress):
"""
Public method to retrieve a report for an IP address.
@param ipAddress valid IPv4 address in dotted quad notation
@type str
"""
self.__lastIP = ipAddress
queryItems = [
("apikey", Preferences.getHelp("VirusTotalServiceKey")),
("ip", ipAddress),
]
url = QUrl(self.GetIpAddressReportUrl)
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
query = QUrlQuery()
query.setQueryItems(queryItems)
url.setQuery(query)
else:
url.setQueryItems(queryItems)
request = QNetworkRequest(url)
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.get(request)
reply.finished.connect(self.__getIpAddressReportFinished)
self.__replies.append(reply)
def __getIpAddressReportFinished(self):
"""
Private slot to process the IP address report data.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == 0:
E5MessageBox.information(
None,
self.tr("VirusTotal IP Address Report"),
self.tr("""VirusTotal does not have any information for"""
""" the given IP address."""))
elif result["response_code"] == -1:
E5MessageBox.information(
None,
self.tr("VirusTotal IP Address Report"),
self.tr("""The submitted IP address is invalid."""))
else:
owner = result["as_owner"]
resolutions = result["resolutions"]
try:
urls = result["detected_urls"]
except KeyError:
urls = []
from .VirusTotalIpReportDialog import VirusTotalIpReportDialog
self.__ipReportDlg = VirusTotalIpReportDialog(
self.__lastIP, owner, resolutions, urls)
self.__ipReportDlg.show()
self.__replies.remove(reply)
reply.deleteLater()
def getDomainReport(self, domain):
"""
Public method to retrieve a report for a domain.
@param domain domain name
@type str
"""
self.__lastDomain = domain
queryItems = [
("apikey", Preferences.getHelp("VirusTotalServiceKey")),
("domain", domain),
]
url = QUrl(self.GetDomainReportUrl)
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
query = QUrlQuery()
query.setQueryItems(queryItems)
url.setQuery(query)
else:
url.setQueryItems(queryItems)
request = QNetworkRequest(url)
import Helpviewer.HelpWindow
nam = Helpviewer.HelpWindow.HelpWindow.networkAccessManager()
reply = nam.get(request)
reply.finished.connect(self.__getDomainReportFinished)
self.__replies.append(reply)
def __getDomainReportFinished(self):
"""
Private slot to process the IP address report data.
"""
reply = self.sender()
if reply.error() == QNetworkReply.NoError:
result = json.loads(str(reply.readAll(), "utf-8"))
if result["response_code"] == 0:
E5MessageBox.information(
None,
self.tr("VirusTotal Domain Report"),
self.tr("""VirusTotal does not have any information for"""
""" the given domain."""))
elif result["response_code"] == -1:
E5MessageBox.information(
None,
self.tr("VirusTotal Domain Report"),
self.tr("""The submitted domain address is invalid."""))
else:
resolutions = result["resolutions"]
try:
urls = result["detected_urls"]
except KeyError:
urls = []
try:
subdomains = result["subdomains"]
except KeyError:
subdomains = []
try:
bdCategory = result["BitDefender category"]
except KeyError:
bdCategory = self.tr("not available")
try:
tmCategory = result["TrendMicro category"]
except KeyError:
tmCategory = self.tr("not available")
try:
wtsCategory = result["Websense ThreatSeeker category"]
except KeyError:
wtsCategory = self.tr("not available")
try:
whois = result["whois"]
except KeyError:
whois = ""
from .VirusTotalDomainReportDialog import \
VirusTotalDomainReportDialog
self.__domainReportDlg = VirusTotalDomainReportDialog(
self.__lastDomain, resolutions, urls, subdomains,
bdCategory, tmCategory, wtsCategory, whois)
self.__domainReportDlg.show()
self.__replies.remove(reply)
reply.deleteLater()
def close(self):
"""
Public slot to close the API.
"""
for reply in self.__replies:
reply.abort()
self.__ipReportDlg and self.__ipReportDlg.close()
self.__domainReportDlg and self.__domainReportDlg.close()
|
gpl-3.0
| -7,896,726,135,286,818,000
| 36.534146
| 79
| 0.576841
| false
| 4.390585
| false
| false
| false
|
chenzilin/git-repo
|
subcmds/help.py
|
1
|
4832
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from formatter import AbstractFormatter, DumbWriter
from color import Coloring
from command import PagedCommand, MirrorSafeCommand, GitcAvailableCommand, GitcClientCommand
import gitc_utils
class Help(PagedCommand, MirrorSafeCommand):
common = False
helpSummary = "Display detailed help on a command"
helpUsage = """
%prog [--all|command]
"""
helpDescription = """
Displays detailed usage information about a command.
"""
def _PrintAllCommands(self):
print('usage: repo COMMAND [ARGS]')
print('The complete list of recognized repo commands are:')
commandNames = list(sorted(self.commands))
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print(fmt % (name, summary))
print("See 'repo help <command>' for more information on a "
'specific command.')
def _PrintCommonCommands(self):
print('usage: repo COMMAND [ARGS]')
print('The most commonly used repo commands are:')
def gitc_supported(cmd):
if not isinstance(cmd, GitcAvailableCommand) and not isinstance(cmd, GitcClientCommand):
return True
if self.manifest.isGitcClient:
return True
if isinstance(cmd, GitcClientCommand):
return False
if gitc_utils.get_gitc_manifest_dir():
return True
return False
commandNames = list(sorted([name
for name, command in self.commands.items()
if command.common and gitc_supported(command)]))
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = self.commands[name]
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print(fmt % (name, summary))
print(
"See 'repo help <command>' for more information on a specific command.\n"
"See 'repo help --all' for a complete list of recognized commands.")
def _PrintCommandHelp(self, cmd):
class _Out(Coloring):
def __init__(self, gc):
Coloring.__init__(self, gc, 'help')
self.heading = self.printer('heading', attr='bold')
self.wrap = AbstractFormatter(DumbWriter())
def _PrintSection(self, heading, bodyAttr):
try:
body = getattr(cmd, bodyAttr)
except AttributeError:
return
if body == '' or body is None:
return
self.nl()
self.heading('%s', heading)
self.nl()
self.nl()
me = 'repo %s' % cmd.NAME
body = body.strip()
body = body.replace('%prog', me)
asciidoc_hdr = re.compile(r'^\n?#+ (.+)$')
for para in body.split("\n\n"):
if para.startswith(' '):
self.write('%s', para)
self.nl()
self.nl()
continue
m = asciidoc_hdr.match(para)
if m:
self.heading(m.group(1))
self.nl()
self.nl()
continue
self.wrap.add_flowing_data(para)
self.wrap.end_paragraph(1)
self.wrap.end_paragraph(0)
out = _Out(self.manifest.globalConfig)
out._PrintSection('Summary', 'helpSummary')
cmd.OptionParser.print_help()
out._PrintSection('Description', 'helpDescription')
def _Options(self, p):
p.add_option('-a', '--all',
dest='show_all', action='store_true',
help='show the complete list of commands')
def Execute(self, opt, args):
if len(args) == 0:
if opt.show_all:
self._PrintAllCommands()
else:
self._PrintCommonCommands()
elif len(args) == 1:
name = args[0]
try:
cmd = self.commands[name]
except KeyError:
print("repo: '%s' is not a repo command." % name, file=sys.stderr)
sys.exit(1)
cmd.manifest = self.manifest
self._PrintCommandHelp(cmd)
else:
self._PrintCommandHelp(self)
|
apache-2.0
| -7,711,002,196,199,770,000
| 28.284848
| 94
| 0.617343
| false
| 3.976955
| false
| false
| false
|
fga-gpp-mds/2017.2-Receituario-Medico
|
medical_prescription/user/test/test_model_send_invitation.py
|
1
|
1112
|
# Standard library
import hashlib
import random
import datetime
# Django imports
from django.test import TestCase
# Local django imports
from user.models import SendInvitationProfile, Patient
class TestSendInvitationProfile(TestCase):
def setUp(self):
self.send_invitation_profile = SendInvitationProfile()
self.patient = Patient.objects.create_user(email='patient@patient.com')
self.salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
self.activation_key = hashlib.sha1(str(self.salt+self.patient.email).encode('utf-8')).hexdigest()
self.key_expires = datetime.datetime.today() + datetime.timedelta(2)
self.send_invitation_profile = SendInvitationProfile.objects.create(activation_key=self.activation_key,
patient=self.patient,
key_expires=self.key_expires)
def test_user_str(self):
self.assertEquals(str(self.send_invitation_profile), 'patient@patient.com',)
|
mit
| 954,918,931,982,965,900
| 40.037037
| 111
| 0.636282
| false
| 4.229008
| false
| false
| false
|
hzlf/openbroadcast
|
website/apps/abcast/views/schedulerviews.py
|
1
|
11454
|
from django.views.generic import DetailView, ListView, FormView, UpdateView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.shortcuts import get_object_or_404, render_to_response
from django.db.models import Avg
from django import http
from django.http import HttpResponse, HttpResponseForbidden, Http404, HttpResponseRedirect
from django.utils import simplejson as json
from django.conf import settings
from django.shortcuts import redirect
from django.core import serializers
from django.utils.translation import ugettext as _
import json
from django.template import RequestContext
from abcast.models import Emission, Channel
from alibrary.models import Playlist
#from abcast.filters import EmissionFilter
from tagging.models import Tag, TaggedItem
from tagging.utils import calculate_cloud
import datetime
from jsonview.decorators import json_view
import jsonview
from easy_thumbnails.files import get_thumbnailer
from django.db.models import Q
from lib.util import tagging_extra
# logging
import logging
logger = logging.getLogger(__name__)
SCHEDULER_GRID_WIDTH = getattr(settings, 'SCHEDULER_GRID_WIDTH', 830)
SCHEDULER_GRID_OFFSET = getattr(settings, 'SCHEDULER_GRID_OFFSET', 60)
SCHEDULER_PPH = getattr(settings, 'SCHEDULER_PPH', 42)
SCHEDULER_PPD = getattr(settings, 'SCHEDULER_PPD', 110) # actually should be calculated
# how long ahead should the schedule be locked
SCHEDULER_LOCK_AHEAD = getattr(settings, 'SCHEDULER_LOCK_AHEAD', 60) # 1 minute, to allow caching of files
SCHEDULER_NUM_DAYS = 7
# hours to offset the schedule
# 6: day starts at 6:00 and goes until 6:00
SCHEDULER_OFFSET = getattr(settings, 'SCHEDULER_OFFSET', 6)
SCHEDULER_DEFAULT_CHANNEL_ID = getattr(settings, 'SCHEDULER_DEFAULT_CHANNEL_ID', 1)
def schedule(request):
log = logging.getLogger('abcast.schedulerviews.schedule')
data = {}
# pet all available channels
data['channels'] = Channel.objects.filter(has_scheduler=True)
data['list_style'] = request.GET.get('list_style', 's')
data['days_offset'] = request.GET.get('days_offset', 0)
data['get'] = request.GET
num_days = request.GET.get('num_days', SCHEDULER_NUM_DAYS)
data['num_days'] = int(num_days)
days = []
today = datetime.datetime.now()
today = datetime.datetime(today.year, today.month, today.day)
offset = datetime.timedelta(days=-today.weekday() + int(data['days_offset']))
for day in range(int(num_days)):
date = today + offset
#date = date.strftime("%a, %d %b %Y %H:%M:%S +0000")
days.append( date )
offset += datetime.timedelta(days=1)
data['today'] = today
data['days'] = days
data['pph'] = SCHEDULER_PPH
data['ppd'] = (SCHEDULER_GRID_WIDTH - SCHEDULER_GRID_OFFSET) / int(num_days)
data['offset'] = SCHEDULER_OFFSET
# build a range-filter string for the API
range_start = days[0] + datetime.timedelta(hours=SCHEDULER_OFFSET)
range_end = days[-1] + datetime.timedelta(hours=SCHEDULER_OFFSET + 24)
range_start = range_start.strftime("%Y-%m-%dT%H:%M:%S")
range_end = range_end.strftime("%Y-%m-%dT%H:%M:%S")
data['range_filter'] = '&time_start__gte=%s&time_end__lte=%s&' % (range_start, range_end)
channel_id = request.GET.get('channel_id', SCHEDULER_DEFAULT_CHANNEL_ID)
channel_id = int(channel_id)
channel = Channel.objects.get(pk=channel_id)
dayparts = channel.get_dayparts(days[0])
data['dayparts'] = dayparts
data['channel'] = channel
print dayparts
for dp in dayparts:
print dp.duration
log.debug('grid pph: %s' % data['pph'])
log.debug('grid ppd: %s' % data['ppd'])
data['station_time'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
# look for a selected playlist in session
playlist_id = request.session.get('scheduler_selected_playlist_id', None)
if playlist_id:
data['selected_playlist'] = Playlist.objects.get(pk=playlist_id)
log.debug('schedule offset: %s' % offset)
log.debug('schedule today: %s' % today)
log.debug('schedule playlist_id: %s' % playlist_id)
return render_to_response('abcast/schedule.html', data, context_instance=RequestContext(request))
class EmissionListView(ListView):
model = Emission
extra_context = {}
def get_context_data(self, **kwargs):
context = super(EmissionListView, self).get_context_data(**kwargs)
self.extra_context['list_style'] = self.request.GET.get('list_style', 's')
self.extra_context['get'] = self.request.GET
days = []
today = datetime.datetime.now()
offset = datetime.timedelta(days=-today.weekday())
for day in range(7):
date = today + offset
#date = date.strftime("%a, %d %b %Y %H:%M:%S +0000")
days.append( date )
offset += datetime.timedelta(days=1)
self.extra_context['today'] = today
self.extra_context['days'] = days
context.update(self.extra_context)
return context
def get_queryset(self, **kwargs):
# return render_to_response('my_app/template.html', {'filter': f})
kwargs = {}
self.tagcloud = None
q = self.request.GET.get('q', None)
if q:
qs = Emission.objects.filter(Q(name__istartswith=q))\
.distinct()
else:
qs = Emission.objects.all()
return qs
class EmissionDetailView(DetailView):
# context_object_name = "emission"
model = Emission
extra_context = {}
def render_to_response(self, context):
return super(EmissionDetailView, self).render_to_response(context, mimetype="text/html")
def get_context_data(self, **kwargs):
obj = kwargs.get('object', None)
context = super(EmissionDetailView, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
"""
views for playlist / emission selection
"""
#@json_view
def select_playlist(request):
log = logging.getLogger('abcast.schedulerviews.select_playlist')
playlist_id = request.GET.get('playlist_id', None)
next = request.GET.get('next', None)
if not playlist_id:
request.session['scheduler_selected_playlist_id'] = None
try:
playlist = Playlist.objects.get(pk=playlist_id)
except Playlist.DoesNotExist:
log.warning('playlist does not exists. (id: %s)' % playlist_id)
raise Http404
request.session['scheduler_selected_playlist_id'] = playlist.pk
log.debug('nex: %s' % next)
log.debug('playlist_id: %s' % playlist_id)
if next:
return redirect(next)
data = {
'status': True,
'playlist_id': playlist.id
}
#return data
data = json.dumps(data)
return HttpResponse(data, mimetype='application/json')
"""
put object to schedule
"""
@json_view
def schedule_object(request):
log = logging.getLogger('abcast.schedulerviews.schedule_object')
ct = request.POST.get('ct', None)
obj_id = request.POST.get('obj_id', None)
top = request.POST.get('top', None)
left = request.POST.get('left', None)
range_start = request.POST.get('range_start', None)
range_end = request.POST.get('range_end', None)
num_days = request.POST.get('num_days', SCHEDULER_NUM_DAYS)
log.debug('content type: %s' % ct)
if ct == 'playlist':
obj = Playlist.objects.get(pk=int(obj_id))
log.debug('object to schedule: %s' % obj.name)
pph = SCHEDULER_PPH
# ppd = SCHEDULER_PPD
ppd = (SCHEDULER_GRID_WIDTH - SCHEDULER_GRID_OFFSET) / int(num_days)
top = float(top) / pph * 60
offset_min = int(15 * round(float(top)/15))
left = float(left) / ppd
offset_d = int(round(float(left)))
log.debug('minutes (offset): %s' % offset_min)
log.debug('days (offset): %s' % offset_d)
# calculate actual date/time for position
schedule_start = datetime.datetime.strptime('%s 00:00' % range_start, '%Y-%m-%d %H:%M')
# add offsets
time_start = schedule_start + datetime.timedelta(minutes=offset_min)
time_start = time_start + datetime.timedelta(days=offset_d)
time_start = time_start + datetime.timedelta(hours=SCHEDULER_OFFSET)
# time_end = time_start + datetime.timedelta(milliseconds=obj.get_duration())
# for duration calculation we use the 'target duration' (to avoid blocked slots)
time_end = time_start + datetime.timedelta(seconds=(obj.target_duration))
log.debug('time_start: %s' % time_start)
log.debug('time_end: %s' % time_end)
# check if in past
now = datetime.datetime.now()
lock_end = now + datetime.timedelta(seconds=SCHEDULER_LOCK_AHEAD)
if lock_end > time_start:
return { 'message': _('You cannot schedule things in the past!') }
# check if slot is free
# hm just allow some seconds of tolerance (in case of mini-overlaps)
es = Emission.objects.filter(time_end__gt=time_start + datetime.timedelta(seconds=2), time_start__lt=time_end)
if es.count() > 0:
for em in es:
print 'Blocking emission: %s' % em.id
print em.time_start
print em.time_end
return { 'message': _('Sorry, but the desired time does not seem to be available.') }
# if no errors so far -> create emission and attach object
e = Emission(content_object=obj, time_start=time_start, user=request.user)
e.save()
data = {
'status': True,
'obj_id': obj_id
}
return data
#data = json.dumps(data)
#return HttpResponse(data, mimetype='application/json')
"""
copy a day to another
"""
@json_view
def copy_paste_day(request):
log = logging.getLogger('abcast.schedulerviews.copy_day')
source = request.POST.get('source', None)
target = request.POST.get('target', None)
channel_id = request.POST.get('channel_id', SCHEDULER_DEFAULT_CHANNEL_ID)
channel = Channel.objects.get(pk=channel_id)
log.debug('copy from: %s to %s' % (source, target))
if source and target:
source = datetime.datetime.strptime(source, '%Y-%m-%d')
target = datetime.datetime.strptime(target, '%Y-%m-%d')
offset = (target - source)
source_start = source + datetime.timedelta(hours=SCHEDULER_OFFSET)
source_end = source_start + datetime.timedelta(hours=24)
log.debug('source: %s to %s' % (source_start, source_end))
log.debug('offset: %s' % (offset))
# get emissions
es = Emission.objects.filter(time_start__gte=source_start, time_end__lte=source_end)
for e in es:
print e
e.pk = None
e.uuid = None
e.locked = False
e.time_start = e.time_start + offset
e.save()
#ne = Emission()
now = datetime.datetime.now()
data = {
'status': True,
}
return data
|
gpl-3.0
| 4,984,770,444,386,933,000
| 26.868613
| 114
| 0.620918
| false
| 3.628128
| false
| false
| false
|
jonhadfield/acli
|
lib/acli/output/vpc.py
|
1
|
4347
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output import (output_ascii_table, output_ascii_table_list, dash_if_none)
from colorclass import Color, Windows
Windows.enable(auto_colors=True, reset_atexit=True)
def get_tag(name=None, tags=None):
if tags:
for tag in tags:
if tag.get('Key') == name:
return tag.get('Value')
def output_vpc_list(vpcs=None):
"""
@type vpcs: dict
"""
td = list()
table_header = [Color('{autoblue}vpc id{/autoblue}'), Color('{autoblue}name{/autoblue}'),
Color('{autoblue}CIDR block{/autoblue}'), Color('{autoblue}tenancy{/autoblue}'),
Color('{autoblue}state{/autoblue}'), Color('{autoblue}DHCP options{/autoblue}'),
Color('{autoblue}default vpc{/autoblue}')]
for vpc in vpcs.get('Vpcs'):
vpcid = vpc.get('VpcId')
cidr_block = vpc.get('CidrBlock')
tenancy = vpc.get('InstanceTenancy')
state = vpc.get('State')
dhcpoptions = vpc.get('DhcpOptionsId')
default = str(vpc.get('IsDefault'))
td.append([vpcid,
dash_if_none(get_tag(name='Name', tags=vpc.get('Tags'))),
dash_if_none(cidr_block),
dash_if_none(tenancy),
dash_if_none(state),
dash_if_none(dhcpoptions),
default])
output_ascii_table_list(table_title=Color('{autowhite}VPCs{/autowhite}'),
table_data=td,
table_header=table_header,
inner_heading_row_border=True)
exit(0)
def output_vpc_info(vpc=None, subnets=None):
"""
@type vpc: ec2.Vpc
@type subnets: dict
"""
if vpc:
td = list()
td.append([Color('{autoblue}vpc id{/autoblue}'), vpc.get('VpcId')])
td.append([Color('{autoblue}CIDR block{/autoblue}'), vpc.get('CidrBlock')])
td.append([Color('{autoblue}default{/autoblue}'), str(vpc.get('IsDefault'))])
td.append([Color('{autoblue}tenancy{/autoblue}'), vpc.get('InstanceTenancy')])
td.append([Color('{autoblue}state{/autoblue}'), dash_if_none(vpc.get('State'))])
td.append([Color('{autoblue}tags{/autoblue}'), " "])
if vpc.get('Tags'):
for vpc_tag in vpc.get('Tags'):
td.append([Color('{autoblue}' + "{0}".format(vpc_tag.get('Key'))+'{/autoblue}'),
" {0}".format(vpc_tag.get('Value'))])
if subnets:
td.append(["{0}".format('-' * 30), "{0}".format('-' * 30)])
td.append([Color('{autowhite}SUBNETS{/autowhite}'), " "])
for subnet in subnets.get('Subnets'):
td.append(["{0}".format('-' * 30),
"{0}".format('-' * 30)])
td.append([Color('{autoblue}subnet id{/autoblue}'),
subnet.get('SubnetId')])
td.append([Color('{autoblue}az{/autoblue}'),
subnet.get('AvailabilityZone')])
td.append([Color('{autoblue}state{/autoblue}'),
subnet.get('State')])
td.append([Color('{autoblue}available IPs{/autoblue}'),
str(subnet.get('AvailableIpAddressCount'))])
td.append([Color('{autoblue}CIDR block{/autoblue}'),
subnet.get('CidrBlock')])
td.append([Color('{autoblue}default for az{/autoblue}'),
str(subnet.get('DefaultForAz'))])
td.append([Color('{autoblue}map public ip on launch{/autoblue}'),
str(subnet.get('MapPublicIpOnLaunch'))])
if subnet.get('Tags'):
td.append([Color('{autoblue}tags{/autoblue}'), "-"])
for tag in subnet.get('Tags'):
tag_key, tag_value = dash_if_none(tag.get('Key')), dash_if_none(tag.get('Value'))
td.append([Color('{autoblue}'+" {}".format(tag_key)+'{/autoblue}'), "{}".format(tag_value)])
output_ascii_table(table_title=Color('{autowhite}vpc info{/autowhite}'),
table_data=td)
else:
exit('VPC does not exist.')
exit(0)
|
mit
| 8,650,758,224,400,534,000
| 46.769231
| 116
| 0.517138
| false
| 3.668354
| false
| false
| false
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0059_sample_dataset_file_path.py
|
1
|
1937
|
"""
Migration script to modify the 'file_path' field type in 'sample_dataset' table
to 'TEXT' so that it can support large file paths exceeding 255 characters
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
from galaxy.util.json import loads, dumps
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
SampleDataset_table = Table( "sample_dataset", metadata, autoload=True )
except NoSuchTableError, e:
SampleDataset_table = None
log.debug( "Failed loading table 'sample_dataset'" )
if SampleDataset_table is not None:
cmd = "SELECT id, file_path FROM sample_dataset"
result = migrate_engine.execute( cmd )
filepath_dict = {}
for r in result:
id = int(r[0])
filepath_dict[id] = r[1]
# remove the 'file_path' column
try:
SampleDataset_table.c.file_path.drop()
except Exception, e:
log.debug( "Deleting column 'file_path' from the 'sample_dataset' table failed: %s" % ( str( e ) ) )
# create the column again
try:
col = Column( "file_path", TEXT )
col.create( SampleDataset_table )
assert col is SampleDataset_table.c.file_path
except Exception, e:
log.debug( "Creating column 'file_path' in the 'sample_dataset' table failed: %s" % ( str( e ) ) )
for id, file_path in filepath_dict.items():
cmd = "update sample_dataset set file_path='%s' where id=%i" % (file_path, id)
migrate_engine.execute( cmd )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
|
gpl-3.0
| 8,046,237,435,248,791,000
| 31.830508
| 112
| 0.636551
| false
| 3.858566
| false
| false
| false
|
vanadium23/catalog-project
|
catalog/app/models.py
|
1
|
2366
|
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from app.database import Base
class User(Base):
"""This is an ORM model for logging users"""
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(100), nullable=False, unique=True)
picture = Column(String(250))
class Category(Base):
"""This is an ORM model for our categories"""
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False, unique=True)
description = Column(String(250), nullable=True)
items = relationship("Item",
backref="Category",
cascade="all, delete-orphan")
author_id = Column(Integer, ForeignKey('users.id'))
author = relationship('User')
def __init__(self, name, description, author_id):
self.name = name
self.description = description
self.author_id = author_id
@property
def to_json(self):
category = {"name": self.name,
"description": self.description,
"id": self.id
}
category['items'] = [i.to_json for i in self.items]
return category
class Item(Base):
"""This is an ORM model for our items"""
__tablename__ = 'items'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False, unique=True)
description = Column(String(250), nullable=True)
image_name = Column(String(250),
nullable=False,
default='no-image-large.png')
category_id = Column(Integer, ForeignKey('categories.id'))
category = relationship('Category')
author_id = Column(Integer, ForeignKey('users.id'))
author = relationship('User')
def __init__(self, name, description, category_id, author_id):
self.name = name
self.description = description
self.category_id = category_id
self.author_id = author_id
@property
def to_json(self):
return {"id": self.id,
"name": self.name,
"description": self.description,
"image_name": self.image_name,
"category_id": self.category_id
}
|
mit
| -6,516,774,220,803,841,000
| 31.861111
| 66
| 0.592139
| false
| 4.195035
| false
| false
| false
|
py-in-the-sky/challenges
|
mine_allocations.py
|
1
|
8134
|
"""
Greedy Algorithm
https://community.topcoder.com/stat?c=problem_statement&pm=1957&rd=4650
Discussion: https://www.topcoder.com/community/data-science/data-science-tutorials/greedy-is-good/
"[T]he [expected] profit of allocating an extra worker to a mine is always higher or equal
with the [expected] profit of allocating the next extra worker to that mine." That is,
for each mine, the profit from allocating an additional miner is a non-increasing
sequence; the profit you'll get from adding this miner is greater than or
equal to the profit from allocating the next.
Because of this structure, we can devise a greedy algorithm that finds the
globally maximum profit.
"""
from collections import deque
def P_at_least_n(n, probabilities):
return sum(probabilities[n:])
def get_marginal_profits(mine):
probabilities = map(lambda f: f / 100, map(float, mine.split(', ')))
mine_len = len(probabilities)
marignal_probabilities = [P_at_least_n(n, probabilities) for n in xrange(mine_len)]
mp = marignal_probabilities + [0]
p = probabilities
marginal_profits = (mp[i+1] * 60 + p[i] * (50 - 10*(i-1)) + (1 - mp[i]) * -20 for i in xrange(mine_len))
marginal_profits = deque(marginal_profits)
marginal_profits.popleft() # remove p_0, which is always 1.0 and not needed for allocation decisions
return marginal_profits
def get_allocation(mines, miners):
marginal_profits = map(get_marginal_profits, mines)
allocation = [0] * len(mines)
for _ in xrange(miners):
available_mines = (i for i,_ in enumerate(marginal_profits) if allocation[i] < 6)
i = max(available_mines, key=lambda i: marginal_profits[i][0])
mine = marginal_profits[i]
mine.popleft() # remove marginal profit from used allocation
allocation[i] += 1
return allocation
def tests():
miners = 4
mines = [
"000, 030, 030, 040, 000, 000, 000",
"020, 020, 020, 010, 010, 010, 010"
]
assert get_allocation(mines, miners) == [2, 2]
print 'one'
miners = 8
mines = [
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000",
"100, 000, 000, 000, 000, 000, 000"
]
assert get_allocation(mines, miners) == [6, 2, 0, 0, 0]
print 'two'
miners = 30
mines = [
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000",
"050, 000, 000, 000, 000, 050, 000"
]
assert get_allocation(mines, miners) == [4, 4, 4, 4, 4, 4, 4, 2, 0, 0]
print 'three'
miners = 56
mines = [
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004",
"026, 012, 005, 013, 038, 002, 004"
]
assert get_allocation(mines, miners) == [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print 'four'
miners = 150
mines = [
"100, 000, 000, 000, 000, 000, 000",
"090, 010, 000, 000, 000, 000, 000",
"080, 020, 000, 000, 000, 000, 000",
"075, 025, 000, 000, 000, 000, 000",
"050, 050, 000, 000, 000, 000, 000",
"025, 075, 000, 000, 000, 000, 000",
"020, 080, 000, 000, 000, 000, 000",
"010, 090, 000, 000, 000, 000, 000",
"000, 100, 000, 000, 000, 000, 000",
"000, 090, 010, 000, 000, 000, 000",
"000, 080, 020, 000, 000, 000, 000",
"000, 075, 025, 000, 000, 000, 000",
"000, 050, 050, 000, 000, 000, 000",
"000, 025, 075, 000, 000, 000, 000",
"000, 020, 080, 000, 000, 000, 000",
"000, 010, 090, 000, 000, 000, 000",
"000, 000, 100, 000, 000, 000, 000",
"000, 000, 090, 010, 000, 000, 000",
"000, 000, 080, 020, 000, 000, 000",
"000, 000, 075, 025, 000, 000, 000",
"000, 000, 050, 050, 000, 000, 000",
"000, 000, 025, 075, 000, 000, 000",
"000, 000, 020, 080, 000, 000, 000",
"000, 000, 010, 090, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 100, 000, 000, 000",
"000, 000, 000, 090, 010, 000, 000",
"000, 000, 000, 080, 020, 000, 000",
"000, 000, 000, 075, 025, 000, 000",
"000, 000, 000, 050, 050, 000, 000",
"000, 000, 000, 025, 075, 000, 000",
"000, 000, 000, 020, 080, 000, 000",
"000, 000, 000, 010, 090, 000, 000",
"000, 000, 000, 000, 100, 000, 000",
"000, 000, 000, 000, 090, 010, 000",
"000, 000, 000, 000, 080, 020, 000",
"000, 000, 000, 000, 075, 025, 000",
"000, 000, 000, 000, 050, 050, 000",
"000, 000, 000, 000, 025, 075, 000",
"000, 000, 000, 000, 020, 080, 000",
"000, 000, 000, 000, 010, 090, 000",
"000, 000, 000, 000, 000, 100, 000",
"000, 000, 000, 000, 000, 090, 010",
"000, 000, 000, 000, 000, 080, 020",
"000, 000, 000, 000, 000, 075, 025",
"000, 000, 000, 000, 000, 050, 050",
"000, 000, 000, 000, 000, 025, 075",
"000, 000, 000, 000, 000, 020, 080",
"000, 000, 000, 000, 000, 010, 090",
"000, 000, 000, 000, 000, 000, 100"
]
assert get_allocation(mines, miners) == [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6]
print 'five'
print 'tests pass!'
if __name__ == '__main__':
tests()
|
mit
| -5,176,688,691,923,415,000
| 38.678049
| 243
| 0.512294
| false
| 2.432416
| false
| false
| false
|
Nic30/hwtHls
|
hwtHls/examples/pid.py
|
1
|
1640
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import Add
from hwt.synthesizer.param import Param
from hwtHls.platform.virtual import VirtualHlsPlatform
from hwtHls.hls import Hls
from hwtLib.logic.pid import PidController
class PidControllerHls(PidController):
def _config(self):
super(PidControllerHls, self)._config()
self.CLK_FREQ = Param(int(100e6))
def _impl(self):
# register of current output value
u = self._reg("u", dtype=self.output._dtype, def_val=0)
# create y-pipeline registers (y -> y_reg[0]-> y_reg[1])
y = [self.input, ]
for i in range(2):
_y = self._reg("y_reg%d" % i, dtype=self.input._dtype, def_val=0)
# feed data from last register
_y(y[-1])
y.append(_y)
# trim signal to width of output
def trim(signal):
return signal._reinterpret_cast(self.output._dtype)
# create arith. expressions between inputs and regs
with Hls(self, freq=self.CLK_FREQ) as hls:
io = hls.io
err = io(self.input) - io(self.target)
a = [io(c) for c in self.coefs]
y = [io(_y) for _y in y]
_u = Add(io(u), a[0] * err, a[1] * y[0],
a[2] * y[1], a[3] * y[2], key=trim)
hls.io(u)(_u)
# propagate output value register to output
self.output(u)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = PidController()
print(to_rtl_str(u))
u = PidControllerHls()
print(to_rtl_str(u, target_platform=VirtualHlsPlatform()))
|
mit
| -3,685,152,865,492,009,500
| 29.943396
| 77
| 0.570732
| false
| 3.166023
| false
| false
| false
|
TinkerMill/mms-server
|
mmsServer/__init__.py
|
1
|
5213
|
#!/usr/bin/env python
# __init__.py
### IMPORTS ###
import os
import sys
import os.path
import time
import json
from datetime import datetime
from flask import Flask, g, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
### GLOBALS ###
# Have to setup the template directory when this is a package
# http://stackoverflow.com/questions/8478404/flask-doesnt-locate-template-directory-when-running-with-twisted
templateDirectory = os.path.join( os.path.dirname( os.path.abspath(__file__)), 'templates')
app = Flask( 'mmsServer', template_folder = templateDirectory)
#app.config.update(dict(DATABASE="tinkermill.db"))
#app.config.from_envvar('FLASKR_SETTINGS', silent=True)
#SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join( os.path.dirname( __file__), 'mms.db')
SQLALCHEMY_DATABASE_URI = 'mysql://root:strangehat@localhost/mms_server'
app.config[ 'SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy( app)
### VIEWS ###
# This should be used in the modules to import the models for use
from mmsServer.models import Member
from mmsServer.models import Logaccessdevice
from mmsServer.models import Accessdevice
# create everything
db.create_all()
db.session.commit()
### FUNCTIONS ###
### ROUTING ###
def log(deviceId, memberId, logMessage):
"""log access to the API. will add a timestamp to the logs
Args:
deviceId (int): The ID of the device
memberId (int): The ID of the member
message (string): message describing what happened
Returns:
nothing
"""
l = Logaccessdevice(device_id = deviceId, member_id = memberId, message=logMessage, timestamp=datetime.now() )
db.session.add(l)
db.session.commit()
@app.route("/checkAccess/<deviceId>/<serialNumber>")
def checkAccess(deviceId=None, serialNumber=None):
"""Return if serialNumber has access to current device
Given a number off the RFID badge, lookup the user that is associated
with that number, and then check if that user has access to that deviceid
# test with :
# http://localhost:5000/checkAccess/0/a2f49dk3 <- YAY
# http://localhost:5000/checkAccess/0/a2f49dk33 <- FAIL
Args:
deviceId (int): The ID of the device
serialNumber (string) : the Serial number on the badge
Returns:
JSON The return code::
{status: true, message: "Success" } -- Success!
{status: false, message: "fail reason" } -- No good.
"""
log(deviceId, memberId, "Requesting Access for serial:" + serialNumber)
m = Member.query.filter(User.serial == serialNumber).first()
if m and m.account_disabled == False:
log(deviceId, memberId, "Granted Access")
return json.dumps({'status': True, 'message': "Success"})
else:
log(deviceId, memberId, "Denined Access : Access has been revoked")
return json.dumps({'status': False, 'message': "Access has been revoked"})
@app.route("/log/usageLog")
def showLogusageLog():
"""
Show the usage log, which shows what memberId's are trying
to accomplish.
http://localhost:5000/log/usageLog
"""
logData = ""
return render_template('usageLog.html', logData=logData)
@app.route("/list/members")
def listMembers():
mlist=""
return render_template('memberList.html', mlist=mlist)
@app.route("/processData" , methods=['POST'])
def processData():
"""take a cmd structure to update a table. see the testData.html
in the static folder to see how to send valid data to this endpoint.
This will be used to create new records and update existing records
base on the cmd (update/new)
Args:
cmd : is this new or an update
table: the table to modify
Returns:
JSON The return code::
{status: true, message: "Success" } -- Success!
{status: false, message: "fail reason" } -- No good.
"""
dataStruct = request.form
cmd = dataStruct['cmd']
table = dataStruct['table']
response = '{status: false, message: "no valid path" }'
# if acl check does not return true then fail to make the update
if not checkAcl(dataStruct['username'], dataStruct['passwordHash'] , cmd, table, get_db()):
print cmd
return '{status: false, message: "ACL Fail Check" }'
# when creating functions to handle data, pass in the dataStruct, and get_db()
# which will give it access to the database
# see the response format above to see what kind of string/JSON to return to the client
# so it knows what happened.
if cmd == "new" and table == "member":
response = newMember(dataStruct, get_db() )
if cmd == "update" and table == "member":
response = updateMember(dataStruct, get_db() )
return response
@app.route("/")
def index():
"""main landing page
"""
return render_template('index.html')
### MAIN ###
def main():
app.run()
if __name__ == '__main__':
main()
|
apache-2.0
| -2,763,381,210,062,170,000
| 28.48538
| 114
| 0.647995
| false
| 3.705046
| false
| false
| false
|
ColumbiaCMB/kid_readout
|
apps/data_taking_scripts/old_scripts/highq_power_sweep_140423_0813f4.py
|
1
|
5959
|
import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator import fit_best_resonator
ri = baseband.RoachBasebandWide()
ri.initialize()
#ri.set_fft_gain(6)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-02-27.npy')
f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_140423_0813f4.npy')
f0s.sort()
#f0s = f0s*(0.9995)
suffix = "led"
nf = len(f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+atonce-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
offsets = offsets
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
#offsets = offsets*4
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1),(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
print f0s
print offsets*1e6
print len(f0s)
if False:
from kid_readout.utils.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
use_fmin = True
attenlist = np.linspace(33,45,5)-9
#attenlist = [44.0]
#attenlist = attenlist[:4]
for atten in attenlist:
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**20
step = 1
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])#np.arange(-4,4)*step
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
raw_input("turn on LED take data")
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
t0 = time.time()
dmod,addr = ri.get_data_seconds(30,demod=True)
print nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
|
bsd-2-clause
| -5,829,926,809,568,526,000
| 32.105556
| 114
| 0.606813
| false
| 2.737253
| false
| false
| false
|
audy/banana
|
banana.py
|
1
|
4205
|
#!/usr/bin/env python
# YAM SPLIT - Austin G. Davis-Richardson
# Splits barcoded, 3-paired illumina files based on a .yaml config file
import sys
import os
from glob import glob
import string
try:
import yaml
except ImportError:
print >> sys.stderr, "could not import yaml\ntry:\n sudo easy_install pyyaml"
quit(1)
# PARSE ARGUMENTS
try:
config_file = sys.argv[1]
reads_directory = sys.argv[2]
output_directory = sys.argv[3]
except IndexError:
print >> sys.stderr, "usage: %s <config.yaml> <reads_directory/> <output_directory/>" %\
sys.argv[0]
# Parse YAML file
config = yaml.load(open(config_file))
# Make Output Directories
try:
os.mkdir(output_directory)
except OSError:
print >> sys.stderr, "%s exists! Delete or move." % output_directory
quit()
for lane in config['lanes']:
for experiment in config['lanes'][lane]:
try:
os.mkdir('%s/%s' % (output_directory, experiment))
except OSError:
continue
# DEFINE HOW FILES LOOK
FILENAME = "s_%(lane)s_%(mate)s_%(number)s_qseq.txt"
RANGE = range(1, 121) # Number goes FROM 0 TO 120
# For reverse complementing the barcode sequence
COMPLEMENT = string.maketrans('GATCRYgatcry', 'CTAGYRctagyr')
# Test reverse complement
assert 'GATCRYgatcry'.translate(COMPLEMENT) == 'CTAGYRctagyr'
# Load Barcodes
for key in config['barcodes']:
print "%s => %s barcodes" % (key, len(config['barcodes'][key]))
# FOR LANE IN LANES
for lane in config['lanes']:
print 'Lane: %s' % lane
# FOR EXP IN LANE.EXPERIMENTS
for experiment in config['lanes'][lane]:
# Load BARCODES
barcode_type = config['lanes'][lane][experiment]['barcodes']
barcode_range = config['lanes'][lane][experiment]['range']
# check if range or individual barcodes specified
if '-' in barcode_range:
start, stop = config['lanes'][lane][experiment]['range'].split('-')
start, stop = int(start), int(stop) + 1
barcode_range = range(start, stop)
print '\t%s (%s, %s-%s)' % (experiment, barcode_type, start, stop),
else:
barcode_range = barcode_range.split()
print '\t%s (%s, %s)' % (experiment, barcode_type, ','.join(barcode_range))
to_keep = dict( (v, k) for k, v in config['barcodes'][barcode_type].items() if k in barcode_range )
# Get which lines to keep:
kept, thrown_away = 0, 0
for file_no in RANGE:
line_to_barcode = {}
filename = '%s/%s' % (reads_directory, FILENAME % {
'lane': lane,
'mate': 2,
'number': '%04d' % file_no })
with open(filename) as handle:
print filename
for n, line in enumerate(handle):
barcode = line.split('\t')[8][::-1].translate(COMPLEMENT)
if barcode in to_keep.keys():
line_to_barcode[n] = to_keep[barcode]
kept += 1
else:
thrown_away += 1
print len(line_to_barcode)
# Output reads.
for mate in [1, 3]:
# MAKE HANDLES:
handles = dict(
(barcode,
open('%s/%s/%s' % (output_directory, experiment,
'IL5_L_%s_B_%03d_%s.txt' % (
lane, barcode, mate
)), 'a')) for barcode in to_keep.values()
)
# Read Raw Reads, splitting
infilename = '%s/%s' % (reads_directory, FILENAME % {
'lane': lane,
'mate': mate,
'number': '%04d' % file_no })
with open(infilename) as handle:
for n, line in enumerate(handle):
if n in line_to_barcode:
barcode = line_to_barcode[n]
print >> handles[barcode], line.strip()
del handles # Garbage Collector can't keep up
|
mit
| -1,384,405,166,370,904,000
| 31.099237
| 107
| 0.524614
| false
| 3.82969
| true
| false
| false
|
barrachri/epcon
|
assopy/views.py
|
1
|
23911
|
# -*- coding: UTF-8 -*-
from django import forms
from django import http
from django.conf import settings as dsettings
from django.contrib import auth
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.util import unquote
from django.core.urlresolvers import reverse
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from assopy import forms as aforms
from assopy import janrain
from assopy import models
from assopy import settings
from assopy import utils as autils
if settings.GENRO_BACKEND:
from assopy.clients import genro
from email_template import utils
import json
import logging
import urllib
from datetime import datetime
log = logging.getLogger('assopy.views')
class HttpResponseRedirectSeeOther(http.HttpResponseRedirect):
status_code = 303
def __init__(self, url):
if not url.startswith('http'):
url = dsettings.DEFAULT_URL_PREFIX + url
super(HttpResponseRedirectSeeOther, self).__init__(url)
# see: http://www.djangosnippets.org/snippets/821/
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
Parameters:
- template: template name to use
"""
def renderer(func):
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
return render_to_response(output[1], output[0], RequestContext(request))
elif isinstance(output, dict):
return render_to_response(template, output, RequestContext(request))
return output
return wrapper
return renderer
def render_to_json(f):
from conference.views import json_dumps
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json_dumps(d, indent=2)
else:
ct = 'application/json'
j = json_dumps
def wrapper(*args, **kw):
try:
result = f(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, http.HttpResponse):
return result
else:
from django.forms.util import ErrorDict
status = 200 if not isinstance(result, ErrorDict) else 400
result = j(result)
return http.HttpResponse(content=result, content_type=ct, status=status)
return wrapper
@login_required
@render_to('assopy/profile.html')
def profile(request):
user = request.user.assopy_user
if request.method == 'POST':
form = aforms.Profile(data=request.POST, files=request.FILES, instance=user)
if form.is_valid():
form.save()
messages.info(request, 'Profile updated')
return HttpResponseRedirectSeeOther('.')
else:
form = aforms.Profile(instance=user)
return {
'user': user,
'form': form,
}
@login_required
def profile_identities(request):
if request.method == 'POST':
try:
x = request.user.assopy_user.identities.get(identifier=request.POST['identifier'])
except:
return http.HttpResponseBadRequest()
log.info(
'Removed the identity "%s" from the user "%s" "%s"',
x.identifier,
x.user.name(),
x.user.user.email)
x.delete()
if request.is_ajax():
return http.HttpResponse('')
else:
return HttpResponseRedirectSeeOther(reverse('assopy-profile'))
@login_required
@render_to('assopy/billing.html')
def billing(request, order_id=None):
user = request.user.assopy_user
if request.method == 'POST':
form = aforms.BillingData(data=request.POST, files=request.FILES, instance=user)
if form.is_valid():
form.save()
return HttpResponseRedirectSeeOther('.')
else:
form = aforms.BillingData(instance=user)
return {
'user': user,
'form': form,
}
@render_to('assopy/new_account.html')
def new_account(request):
if request.user.is_authenticated():
return redirect('assopy-profile')
if request.method == 'GET':
form = aforms.NewAccountForm()
else:
form = aforms.NewAccountForm(data=request.POST)
if form.is_valid():
data = form.cleaned_data
user = models.User.objects.create_user(
email=data['email'],
first_name=data['first_name'],
last_name=data['last_name'],
password=data['password1'],
)
request.session['new-account-user'] = user.pk
return HttpResponseRedirectSeeOther(reverse('assopy-new-account-feedback'))
return {
'form': form,
'next': request.GET.get('next', '/'),
}
@render_to('assopy/new_account_feedback.html')
def new_account_feedback(request):
try:
user = models.User.objects.get(pk=request.session['new-account-user'])
except KeyError:
return redirect('/')
except models.User.DoesNotExist:
user = None
return {
'u': user,
}
def OTCHandler_V(request, token):
auth.logout(request)
user = token.user
user.is_active = True
user.save()
user = auth.authenticate(uid=user.id)
auth.login(request, user)
return redirect('assopy-profile')
def OTCHandler_J(request, token):
payload = json.loads(token.payload)
email = payload['email']
profile = payload['profile']
log.info('"%s" verified; link to "%s"', email, profile['identifier'])
identity = _linkProfileToEmail(email, profile)
duser = auth.authenticate(identifier=identity.identifier)
auth.login(request, duser)
return redirect('assopy-profile')
def otc_code(request, token):
t = models.Token.objects.retrieve(token)
if t is None:
raise http.Http404()
from assopy.utils import dotted_import
try:
path = settings.OTC_CODE_HANDLERS[t.ctype]
except KeyError:
return http.HttpResponseBadRequest()
return dotted_import(path)(request, t)
def _linkProfileToEmail(email, profile):
try:
current = autils.get_user_account_from_email(email)
except auth.models.User.DoesNotExist:
current = auth.models.User.objects.create_user(janrain.suggest_username(profile), email)
try:
current.first_name = profile['name']['givenName']
except KeyError:
pass
try:
current.last_name = profile['name']['familyName']
except KeyError:
pass
current.is_active = True
current.save()
log.debug('new (active) django user created "%s"', current)
else:
log.debug('django user found "%s"', current)
try:
# se current è stato trovato tra gli utenti locali forse esiste
# anche la controparte assopy
user = current.assopy_user
except models.User.DoesNotExist:
log.debug('the current user "%s" will become an assopy user', current)
user = models.User(user=current)
user.save()
log.debug('a new identity (for "%s") will be linked to "%s"', profile['identifier'], current)
identity = models.UserIdentity.objects.create_from_profile(user, profile)
return identity
@csrf_exempt
def janrain_token(request):
if request.method != 'POST':
return http.HttpResponseNotAllowed(('POST',))
redirect_to = request.session.get('jr_next', reverse('assopy-profile'))
try:
token = request.POST['token']
except KeyError:
return http.HttpResponseBadRequest()
try:
profile = janrain.auth_info(settings.JANRAIN['secret'], token)
except Exception, e:
log.warn('exception during janrain auth info: "%s"', str(e))
return HttpResponseRedirectSeeOther(dsettings.LOGIN_URL)
log.info('janrain profile from %s: %s', profile['providerName'], profile['identifier'])
current = request.user
duser = auth.authenticate(identifier=profile['identifier'])
if duser is None:
log.info('%s is a new identity', profile['identifier'])
# è la prima volta che questo utente si logga con questo provider
if not current.is_anonymous():
verifiedEmail = current.email
else:
# devo creare tutto, utente django, assopy e identità
if not 'verifiedEmail' in profile:
# argh, il provider scelto non mi fornisce un email sicura; per
# evitare il furto di account non posso rendere l'account
# attivo. Devo chiedere all'utente un email valida e inviare a
# quella email un link di conferma.
log.info('janrain profile without a verified email')
request.session['incomplete-profile'] = profile
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-incomplete-profile'))
else:
verifiedEmail = profile['verifiedEmail']
log.info('janrain profile with a verified email "%s"', verifiedEmail)
identity = _linkProfileToEmail(verifiedEmail, profile)
duser = auth.authenticate(identifier=identity.identifier)
auth.login(request, duser)
else:
# è un utente conosciuto, devo solo verificare che lo user associato
# all'identità e quello loggato in questo momento siano la stessa
# persona
if current.is_anonymous():
# ok, non devo fare altro che loggarmi con l'utente collegato
# all'identità
auth.login(request, duser)
elif current != duser:
# l'utente corrente e quello collegato all'identità non coincidano
# devo mostrare un messaggio di errore
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-login_mismatch'))
else:
# non ho niente da fare, l'utente è già loggato
pass
return HttpResponseRedirectSeeOther(redirect_to)
@render_to('assopy/janrain_incomplete_profile.html')
def janrain_incomplete_profile(request):
p = request.session['incomplete-profile']
try:
name = p['displayName']
except KeyError:
name = '%s %s' % (p['name'].get('givenName', ''), p['name'].get('familyName', ''))
class Form(forms.Form):
email = forms.EmailField()
if request.method == 'POST':
form = Form(data=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
payload = {
'email': email,
'profile': p,
}
token = models.Token.objects.create(ctype='j', payload=json.dumps(payload))
current = autils.get_user_account_from_email(email, default=None)
utils.email(
'janrain-incomplete',
ctx={
'name': name,
'provider': p['providerName'],
'token': token,
'current': current,
},
to=[email]
).send()
del request.session['incomplete-profile']
return HttpResponseRedirectSeeOther(reverse('assopy-janrain-incomplete-profile-feedback'))
else:
form = Form()
return {
'provider': p['providerName'],
'name': name,
'form': form,
}
@render_to('assopy/janrain_incomplete_profile_feedback.html')
def janrain_incomplete_profile_feedback(request):
return {}
@render_to('assopy/janrain_login_mismatch.html')
def janrain_login_mismatch(request):
return {}
@render_to('assopy/checkout.html')
def checkout(request):
if request.method == 'POST':
if not request.user.is_authenticated():
return http.HttpResponseBadRequest('unauthorized')
form = aforms.FormTickets(data=request.POST)
if form.is_valid():
data = form.cleaned_data
o = models.Order.objects.create(user=request.user.assopy_user, payment=data['payment'], items=data['tickets'])
if o.payment_url:
return HttpResponseRedirectSeeOther(o.payment_url)
else:
return HttpResponseRedirectSeeOther(reverse('assopy-tickets'))
else:
form = aforms.FormTickets()
return {
'form': form,
}
@login_required
@render_to('assopy/tickets.html')
def tickets(request):
if settings.TICKET_PAGE:
return redirect(settings.TICKET_PAGE)
return {}
@login_required
@render_to_json
def geocode(request):
address = request.GET.get('address', '').strip()
region = request.GET.get('region')
if not address:
return ''
from assopy.utils import geocode as g
return g(address, region=region)
def paypal_billing(request, code):
# questa vista serve a eseguire il redirect su paypol
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.total() == 0:
o.confirm_order(datetime.now())
return HttpResponseRedirectSeeOther(reverse('assopy-paypal-feedback-ok', kwargs={'code': code}))
form = aforms.PayPalForm(o)
return HttpResponseRedirectSeeOther("%s?%s" % (form.paypal_url(), form.as_url_args()))
def paypal_cc_billing(request, code):
# questa vista serve a eseguire il redirect su paypal e aggiungere le info
# per billing con cc
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.total() == 0:
o.confirm_order(datetime.now())
return HttpResponseRedirectSeeOther(reverse('assopy-paypal-feedback-ok', kwargs={'code': code}))
form = aforms.PayPalForm(o)
cc_data = {
"address_override" : 0,
"no_shipping" : 1,
"email": o.user.user.email,
"first_name" : o.card_name,
"last_name": "",
"address1": o.address,
#"zip": o.zip_code,
#"state": o.state,
"country": o.country,
"address_name": o.card_name,
}
qparms = urllib.urlencode([ (k,x.encode('utf-8') if isinstance(x, unicode) else x) for k,x in cc_data.items() ])
return HttpResponseRedirectSeeOther(
"%s?%s&%s" % (
form.paypal_url(),
form.as_url_args(),
qparms
)
)
@render_to('assopy/paypal_cancel.html')
def paypal_cancel(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
form = aforms.PayPalForm(o)
return {'form': form }
# sembra che a volte la redirezione di paypal si concluda con una POST da parte
# del browser (qualcuno ha detto HttpResponseRedirectSeeOther?), dato che non
# eseguo niente di pericoloso evito di controllare il csrf
@csrf_exempt
@render_to('assopy/paypal_feedback_ok.html')
def paypal_feedback_ok(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.user.user != request.user or o.method not in ('paypal', 'cc'):
raise http.Http404()
# aspettiamo un po' per dare tempo a Paypal di inviarci la notifica IPN
from time import sleep
sleep(0.4)
return {
'order': o,
}
@login_required
@render_to('assopy/bank_feedback_ok.html')
def bank_feedback_ok(request, code):
o = get_object_or_404(models.Order, code=code.replace('-', '/'))
if o.user.user != request.user or o.method != 'bank':
raise http.Http404()
return {
'order': o,
}
@login_required
def invoice(request, order_code, code, mode='html'):
if not request.user.is_staff:
userfilter = {
'order__user__user': request.user,
}
else:
userfilter = {}
invoice = get_object_or_404(
models.Invoice,
code=unquote(code),
order__code=unquote(order_code),
**userfilter
)
if mode == 'html':
order = invoice.order
address = '%s, %s' % (order.address, unicode(order.country))
ctx = {
'document': ('Fattura N.', 'Invoice N.'),
'title': unicode(invoice),
'code': invoice.code,
'emit_date': invoice.emit_date,
'order': {
'card_name': order.card_name,
'address': address,
'billing_notes': order.billing_notes,
'cf_code': order.cf_code,
'vat_number': order.vat_number,
},
'items': invoice.invoice_items(),
'note': invoice.note,
'price': {
'net': invoice.net_price(),
'vat': invoice.vat_value(),
'total': invoice.price,
},
'vat': invoice.vat,
'real': settings.IS_REAL_INVOICE(invoice.code),
}
return render_to_response('assopy/invoice.html', ctx, RequestContext(request))
else:
if settings.GENRO_BACKEND:
assopy_id = invoice.assopy_id
data = genro.invoice(assopy_id)
if data.get('credit_note'):
order = get_object_or_404(models.Order, invoices__credit_notes__assopy_id=assopy_id)
else:
order = get_object_or_404(models.Order, assopy_id=data['order_id'])
raw = urllib.urlopen(genro.invoice_url(assopy_id))
else:
hurl = reverse('assopy-invoice-html', args=(order_code, code))
if not settings.WKHTMLTOPDF_PATH:
return HttpResponseRedirectSeeOther(hurl)
raw = _pdf(request, hurl)
order = invoice.order
from conference.models import Conference
try:
conf = Conference.objects\
.get(conference_start__year=order.created.year).code
except Conference.DoesNotExist:
conf = order.created.year
fname = '[%s invoice] %s.pdf' % (conf, invoice.code.replace('/', '-'))
response = http.HttpResponse(raw, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
def _pdf(request, url):
import subprocess
command_args = [
settings.WKHTMLTOPDF_PATH,
'--cookie',
dsettings.SESSION_COOKIE_NAME,
request.COOKIES.get(dsettings.SESSION_COOKIE_NAME),
'--zoom',
'1.3',
"%s%s" % (dsettings.DEFAULT_URL_PREFIX, url),
'-'
]
#print command_args
popen = subprocess.Popen(
command_args,
bufsize=4096,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
raw, _ = popen.communicate()
#print raw
return raw
@login_required
def credit_note(request, order_code, code, mode='html'):
if not request.user.is_staff:
userfilter = { 'invoice__order__user__user': request.user, }
else:
userfilter = {}
try:
cnote = models.CreditNote.objects\
.select_related('invoice__order')\
.get(code=unquote(code), invoice__order__code=unquote(order_code), **userfilter)
except models.CreditNote.DoesNotExist:
raise http.Http404()
order = cnote.invoice.order
if mode == 'html':
address = '%s, %s' % (order.address, unicode(order.country))
items = cnote.note_items()
for x in items:
x['price'] = x['price'] * -1
invoice = cnote.invoice
rif = invoice.code
if invoice.payment_date:
rif = '%s - %s' % (rif, invoice.payment_date.strftime('%d %b %Y'))
note = 'Nota di credito / Credit Note <b>Rif: %s</b>' % rif
ctx = {
'document': ('Nota di credito', 'Credit note'),
'title': unicode(cnote),
'code': cnote.code,
'emit_date': cnote.emit_date,
'order': {
'card_name': order.card_name,
'address': address,
'billing_notes': order.billing_notes,
'cf_code': order.cf_code,
'vat_number': order.vat_number,
},
'items': items,
'note': note,
'price': {
'net': cnote.net_price() * -1,
'vat': cnote.vat_value() * -1,
'total': cnote.price * -1,
},
'vat': cnote.invoice.vat,
'real': True,
}
return render_to_response('assopy/invoice.html', ctx, RequestContext(request))
else:
hurl = reverse('assopy-credit_note-html', args=(order_code, code))
if not settings.WKHTMLTOPDF_PATH:
print "NO WKHTMLTOPDF_PATH SET"
return HttpResponseRedirectSeeOther(hurl)
raw = _pdf(request, hurl)
from conference.models import Conference
try:
conf = Conference.objects\
.get(conference_start__year=order.created.year).code
except Conference.DoesNotExist:
conf = order.created.year
fname = '[%s credit note] %s.pdf' % (conf, cnote.code.replace('/', '-'))
response = http.HttpResponse(raw, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s"' % fname
return response
@login_required
@render_to('assopy/voucher.html')
def voucher(request, order_id, item_id):
item = get_object_or_404(models.OrderItem, order=order_id, id=item_id)
if (not item.ticket or item.ticket.fare.payment_type != 'v' or item.order.user.user != request.user) and not request.user.is_superuser:
raise http.Http404()
return {
'item': item,
}
@csrf_exempt
def order_complete(request, assopy_id):
if request.method != 'POST':
return http.HttpResponseNotAllowed(('POST',))
order = get_object_or_404(models.Order, assopy_id=assopy_id)
r = order.complete()
log.info('remote notice! order "%s" (%s) complete! result=%s', order.code, order.assopy_id, r)
return http.HttpResponse('')
@login_required
@render_to_json
def refund(request, order_id, item_id):
try:
item = models.OrderItem.objects\
.select_related('order')\
.get(order=order_id, id=item_id)
except models.OrderItem.DoesNotExist:
raise http.Http404()
try:
r = models.RefundOrderItem.objects.select_related('refund').get(orderitem=item_id)
if r.refund.status == 'rejected':
r = None
except models.RefundOrderItem.DoesNotExist:
r = None
if request.method == 'POST':
if r:
return http.HttpResponseBadRequest()
try:
d = request.session['doppelganger']
except KeyError:
user = request.user
else:
from django.contrib.auth.models import User
user = User.objects.get(id=d[0])
if not settings.ORDERITEM_CAN_BE_REFUNDED(user, item):
return http.HttpResponseBadRequest()
form = aforms.RefundItemForm(item, data=request.POST)
if not form.is_valid():
return form.errors
data = form.cleaned_data
note = ''
if data['paypal'] or data['bank']:
if data['paypal']:
note += 'paypal: %s\n' % data['paypal']
if data['bank']:
note += 'bank routing: %s\n' % data['bank']
note += '----------------------------------------\n'
r = models.Refund.objects.create_from_orderitem(
item, reason=data['reason'], internal_note=note)
if not r:
return None
return {
'status': r.status,
}
|
bsd-2-clause
| -1,515,016,967,348,325,000
| 34.253687
| 139
| 0.602251
| false
| 3.831063
| false
| false
| false
|
ahjulstad/mathdom-python3
|
mathml/utils/sax_pmathml.py
|
1
|
1682
|
from mathml.pmathml.element import *
from mathml.pmathml.mtoken import MToken
import xml.sax.handler
class MathMLHandler(xml.sax.handler.ContentHandler):
class Elem(object):
__slots__ = ('parent', 'name', 'attributes', 'text', 'children')
def __init__(self, plotter):
self.plotter = plotter
self.current = self.Elem()
self.current.children = []
def characters(self, content):
self.current.text += content
def startElementNS(self, xxx_todo_changeme, qname, attrs):
(ns, name) = xxx_todo_changeme
elem = self.Elem()
elem.parent = self.current
elem.parent.children.append(elem)
elem.text = ''
elem.attributes = {}
for key, value in list(attrs.items()):
elem.attributes[key] = value
elem.children = []
elem.name = name
self.current = elem
def endElementNS(self, xxx_todo_changeme1, qname):
(ns, name) = xxx_todo_changeme1
self.current = self.current.parent
def __buildTreeRecursive(self, node):
klass = xml_mapping[node.name]
if issubclass(klass, MToken):
element = klass(self.plotter, node.text.strip())
else:
children = list(map(self.__buildTreeRecursive, node.children))
element = klass(self.plotter, children)
for name, value in list(node.attributes.items()):
element.setAttribute(name, value)
return element
def buildTree(self):
assert len(self.current.children) == 1
elem = self.__buildTreeRecursive(self.current.children[0])
del self.current
return elem
def buildFromPMathml(etree, plotter):
handler = MathMLHandler(plotter)
etree.saxify(handler)
return handler.buildTree()
def buildFromMathDOM(mathdom, plotter):
return buildFromPMathml(mathdom.to_pmathml(), plotter)
|
mit
| 2,438,675,304,666,861,600
| 28
| 67
| 0.709275
| false
| 3.209924
| false
| false
| false
|
linii/ling229-final
|
metrics/noun_cluster_plot.py
|
1
|
2306
|
#!/usr/bin/python
import os
import sys
# hack to make this able to import topic_modeling. must be run from final_project/ dir
lib_path = os.path.abspath(os.path.join('.'))
sys.path.append(lib_path)
import numpy as np
import pylab
import pickle
from collections import Counter
from topic_modeling.topic_model import preprocess_docs
from pca_plot import reduce_dim
def doc_to_noun_bow(doc_nouns, top_nouns):
top_nouns_set = set(top_nouns)
matching_words = filter(lambda word: word in top_nouns_set, doc_nouns)
counts = Counter(matching_words)
return np.array([counts[noun]/float(len(doc_nouns) + 1) for noun in top_nouns])
def get_most_freq_words(docs, n=100):
all_words = reduce(lambda x, y: x + y, docs)
word_counts = Counter(all_words)
return [word for word, count in word_counts.most_common(n)]
def get_and_save_word_counts(docs, vocab, outfile="metrics/word_counts.csv"):
word_counts_by_doc = np.vstack([doc_to_noun_bow(doc, vocab) for doc in docs])
np.savetxt(outfile, word_counts_by_doc, delimiter=",")
return word_counts_by_doc
def plot_pca_noun_data(noun_counts_by_doc, labels, outfile):
colors = ["green" if label else "red" for label in labels]
reduced_data = reduce_dim(noun_counts_by_doc, 2)
pylab.scatter(reduced_data[:, 0], reduced_data[:, 1], c=colors)
# pylab.ylim(-10, 10)
# pylab.xlim(-10, 10)
pylab.ylabel("Count Data Principal Component 2")
pylab.xlabel("Count Data Principal Component 1")
pylab.title("Word Count Data Plotted By PCA: Nonromantic Lexicon Words")
pylab.savefig(outfile)
pylab.show()
if __name__ == '__main__':
label_file = sys.argv[3]
png_out_file = sys.argv[4]
if sys.argv[1] == "-pos":
postags_file = sys.argv[2]
doc_nouns = preprocess_docs(doc_texts=None, postags_file=postags_file)
noun_counts = get_and_save_word_counts(doc_nouns, get_most_freq_words(doc_nouns, 100))
elif sys.argv[1] == "-csv":
csv_file = sys.argv[2]
noun_counts = np.loadtxt(csv_file, dtype=int, delimiter=",")
labels = np.loadtxt(label_file, dtype=int, delimiter=",")
labels = labels[:np.shape(noun_counts)[0]]
plot_pca_noun_data(noun_counts, labels, png_out_file)
|
gpl-3.0
| 5,005,873,616,967,561,000
| 32.447761
| 94
| 0.656548
| false
| 3.030223
| false
| false
| false
|
zeldin/libsigrokdecode
|
decoders/lpc/pd.py
|
1
|
13089
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012-2013 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
# ...
fields = {
# START field (indicates start or stop of a transaction)
'START': {
0b0000: 'Start of cycle for a target',
0b0001: 'Reserved',
0b0010: 'Grant for bus master 0',
0b0011: 'Grant for bus master 1',
0b0100: 'Reserved',
0b0101: 'Reserved',
0b0110: 'Reserved',
0b0111: 'Reserved',
0b1000: 'Reserved',
0b1001: 'Reserved',
0b1010: 'Reserved',
0b1011: 'Reserved',
0b1100: 'Reserved',
0b1101: 'Start of cycle for a Firmware Memory Read cycle',
0b1110: 'Start of cycle for a Firmware Memory Write cycle',
0b1111: 'Stop/abort (end of a cycle for a target)',
},
# Cycle type / direction field
# Bit 0 (LAD[0]) is unused, should always be 0.
# Neither host nor peripheral are allowed to drive 0b11x0.
'CT_DR': {
0b0000: 'I/O read',
0b0010: 'I/O write',
0b0100: 'Memory read',
0b0110: 'Memory write',
0b1000: 'DMA read',
0b1010: 'DMA write',
0b1100: 'Reserved / not allowed',
0b1110: 'Reserved / not allowed',
},
# SIZE field (determines how many bytes are to be transferred)
# Bits[3:2] are reserved, must be driven to 0b00.
# Neither host nor peripheral are allowed to drive 0b0010.
'SIZE': {
0b0000: '8 bits (1 byte)',
0b0001: '16 bits (2 bytes)',
0b0010: 'Reserved / not allowed',
0b0011: '32 bits (4 bytes)',
},
# CHANNEL field (bits[2:0] contain the DMA channel number)
'CHANNEL': {
0b0000: '0',
0b0001: '1',
0b0010: '2',
0b0011: '3',
0b0100: '4',
0b0101: '5',
0b0110: '6',
0b0111: '7',
},
# SYNC field (used to add wait states)
'SYNC': {
0b0000: 'Ready',
0b0001: 'Reserved',
0b0010: 'Reserved',
0b0011: 'Reserved',
0b0100: 'Reserved',
0b0101: 'Short wait',
0b0110: 'Long wait',
0b0111: 'Reserved',
0b1000: 'Reserved',
0b1001: 'Ready more (DMA only)',
0b1010: 'Error',
0b1011: 'Reserved',
0b1100: 'Reserved',
0b1101: 'Reserved',
0b1110: 'Reserved',
0b1111: 'Reserved',
},
}
class Decoder(srd.Decoder):
api_version = 2
id = 'lpc'
name = 'LPC'
longname = 'Low-Pin-Count'
desc = 'Protocol for low-bandwidth devices on PC mainboards.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['lpc']
channels = (
{'id': 'lframe', 'name': 'LFRAME#', 'desc': 'Frame'},
{'id': 'lclk', 'name': 'LCLK', 'desc': 'Clock'},
{'id': 'lad0', 'name': 'LAD[0]', 'desc': 'Addr/control/data 0'},
{'id': 'lad1', 'name': 'LAD[1]', 'desc': 'Addr/control/data 1'},
{'id': 'lad2', 'name': 'LAD[2]', 'desc': 'Addr/control/data 2'},
{'id': 'lad3', 'name': 'LAD[3]', 'desc': 'Addr/control/data 3'},
)
optional_channels = (
{'id': 'lreset', 'name': 'LRESET#', 'desc': 'Reset'},
{'id': 'ldrq', 'name': 'LDRQ#', 'desc': 'Encoded DMA / bus master request'},
{'id': 'serirq', 'name': 'SERIRQ', 'desc': 'Serialized IRQ'},
{'id': 'clkrun', 'name': 'CLKRUN#', 'desc': 'Clock run'},
{'id': 'lpme', 'name': 'LPME#', 'desc': 'LPC power management event'},
{'id': 'lpcpd', 'name': 'LPCPD#', 'desc': 'Power down'},
{'id': 'lsmi', 'name': 'LSMI#', 'desc': 'System Management Interrupt'},
)
annotations = (
('warnings', 'Warnings'),
('start', 'Start'),
('cycle-type', 'Cycle-type/direction'),
('addr', 'Address'),
('tar1', 'Turn-around cycle 1'),
('sync', 'Sync'),
('data', 'Data'),
('tar2', 'Turn-around cycle 2'),
)
annotation_rows = (
('data', 'Data', (1, 2, 3, 4, 5, 6, 7)),
('warnings', 'Warnings', (0,)),
)
def __init__(self):
self.state = 'IDLE'
self.oldlclk = -1
self.samplenum = 0
self.clocknum = 0
self.lad = -1
self.addr = 0
self.cur_nibble = 0
self.cycle_type = -1
self.databyte = 0
self.tarcount = 0
self.synccount = 0
self.oldpins = None
self.ss_block = self.es_block = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putb(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def handle_get_start(self, lad, lad_bits, lframe):
# LAD[3:0]: START field (1 clock cycle).
# The last value of LAD[3:0] before LFRAME# gets de-asserted is what
# the peripherals must use. However, the host can keep LFRAME# asserted
# multiple clocks, and we output all START fields that occur, even
# though the peripherals are supposed to ignore all but the last one.
self.es_block = self.samplenum
self.putb([1, [fields['START'][lad], 'START', 'St', 'S']])
self.ss_block = self.samplenum
# Output a warning if LAD[3:0] changes while LFRAME# is low.
# TODO
if (self.lad != -1 and self.lad != lad):
self.putb([0, ['LAD[3:0] changed while LFRAME# was asserted']])
# LFRAME# is asserted (low). Wait until it gets de-asserted again
# (the host is allowed to keep it asserted multiple clocks).
if lframe != 1:
return
self.start_field = self.lad
self.state = 'GET CT/DR'
def handle_get_ct_dr(self, lad, lad_bits):
# LAD[3:0]: Cycle type / direction field (1 clock cycle).
self.cycle_type = fields['CT_DR'][lad]
# TODO: Warning/error on invalid cycle types.
if self.cycle_type == 'Reserved':
self.putb([0, ['Invalid cycle type (%s)' % lad_bits]])
self.es_block = self.samplenum
self.putb([2, ['Cycle type: %s' % self.cycle_type]])
self.ss_block = self.samplenum
self.state = 'GET ADDR'
self.addr = 0
self.cur_nibble = 0
def handle_get_addr(self, lad, lad_bits):
# LAD[3:0]: ADDR field (4/8/0 clock cycles).
# I/O cycles: 4 ADDR clocks. Memory cycles: 8 ADDR clocks.
# DMA cycles: no ADDR clocks at all.
if self.cycle_type in ('I/O read', 'I/O write'):
addr_nibbles = 4 # Address is 16bits.
elif self.cycle_type in ('Memory read', 'Memory write'):
addr_nibbles = 8 # Address is 32bits.
else:
addr_nibbles = 0 # TODO: How to handle later on?
# Addresses are driven MSN-first.
offset = ((addr_nibbles - 1) - self.cur_nibble) * 4
self.addr |= (lad << offset)
# Continue if we haven't seen all ADDR cycles, yet.
if (self.cur_nibble < addr_nibbles - 1):
self.cur_nibble += 1
return
self.es_block = self.samplenum
s = 'Address: 0x%%0%dx' % addr_nibbles
self.putb([3, [s % self.addr]])
self.ss_block = self.samplenum
self.state = 'GET TAR'
self.tar_count = 0
def handle_get_tar(self, lad, lad_bits):
# LAD[3:0]: First TAR (turn-around) field (2 clock cycles).
self.es_block = self.samplenum
self.putb([4, ['TAR, cycle %d: %s' % (self.tarcount, lad_bits)]])
self.ss_block = self.samplenum
# On the first TAR clock cycle LAD[3:0] is driven to 1111 by
# either the host or peripheral. On the second clock cycle,
# the host or peripheral tri-states LAD[3:0], but its value
# should still be 1111, due to pull-ups on the LAD lines.
if lad_bits != '1111':
self.putb([0, ['TAR, cycle %d: %s (expected 1111)' % \
(self.tarcount, lad_bits)]])
if (self.tarcount != 1):
self.tarcount += 1
return
self.tarcount = 0
self.state = 'GET SYNC'
def handle_get_sync(self, lad, lad_bits):
# LAD[3:0]: SYNC field (1-n clock cycles).
self.sync_val = lad_bits
self.cycle_type = fields['SYNC'][lad]
# TODO: Warnings if reserved value are seen?
if self.cycle_type == 'Reserved':
self.putb([0, ['SYNC, cycle %d: %s (reserved value)' % \
(self.synccount, self.sync_val)]])
self.es_block = self.samplenum
self.putb([5, ['SYNC, cycle %d: %s' % (self.synccount, self.sync_val)]])
self.ss_block = self.samplenum
# TODO
self.cycle_count = 0
self.state = 'GET DATA'
def handle_get_data(self, lad, lad_bits):
# LAD[3:0]: DATA field (2 clock cycles).
# Data is driven LSN-first.
if (self.cycle_count == 0):
self.databyte = lad
elif (self.cycle_count == 1):
self.databyte |= (lad << 4)
else:
raise Exception('Invalid cycle_count: %d' % self.cycle_count)
if (self.cycle_count != 1):
self.cycle_count += 1
return
self.es_block = self.samplenum
self.putb([6, ['DATA: 0x%02x' % self.databyte]])
self.ss_block = self.samplenum
self.cycle_count = 0
self.state = 'GET TAR2'
def handle_get_tar2(self, lad, lad_bits):
# LAD[3:0]: Second TAR field (2 clock cycles).
self.es_block = self.samplenum
self.putb([7, ['TAR, cycle %d: %s' % (self.tarcount, lad_bits)]])
self.ss_block = self.samplenum
# On the first TAR clock cycle LAD[3:0] is driven to 1111 by
# either the host or peripheral. On the second clock cycle,
# the host or peripheral tri-states LAD[3:0], but its value
# should still be 1111, due to pull-ups on the LAD lines.
if lad_bits != '1111':
self.putb([0, ['Warning: TAR, cycle %d: %s (expected 1111)'
% (self.tarcount, lad_bits)]])
if (self.tarcount != 1):
self.tarcount += 1
return
self.tarcount = 0
self.state = 'IDLE'
def decode(self, ss, es, data):
for (self.samplenum, pins) in data:
# If none of the pins changed, there's nothing to do.
if self.oldpins == pins:
continue
# Store current pin values for the next round.
self.oldpins = pins
# Get individual pin values into local variables.
(lframe, lclk, lad0, lad1, lad2, lad3) = pins[:6]
(lreset, ldrq, serirq, clkrun, lpme, lpcpd, lsmi) = pins[6:]
# Only look at the signals upon rising LCLK edges. The LPC clock
# is the same as the PCI clock (which is sampled at rising edges).
if not (self.oldlclk == 0 and lclk == 1):
self.oldlclk = lclk
continue
# Store LAD[3:0] bit values (one nibble) in local variables.
# Most (but not all) states need this.
if self.state != 'IDLE':
lad = (lad3 << 3) | (lad2 << 2) | (lad1 << 1) | lad0
lad_bits = bin(lad)[2:].zfill(4)
# self.putb([0, ['LAD: %s' % lad_bits]])
# TODO: Only memory read/write is currently supported/tested.
# State machine
if self.state == 'IDLE':
# A valid LPC cycle starts with LFRAME# being asserted (low).
if lframe != 0:
continue
self.ss_block = self.samplenum
self.state = 'GET START'
self.lad = -1
# self.clocknum = 0
elif self.state == 'GET START':
self.handle_get_start(lad, lad_bits, lframe)
elif self.state == 'GET CT/DR':
self.handle_get_ct_dr(lad, lad_bits)
elif self.state == 'GET ADDR':
self.handle_get_addr(lad, lad_bits)
elif self.state == 'GET TAR':
self.handle_get_tar(lad, lad_bits)
elif self.state == 'GET SYNC':
self.handle_get_sync(lad, lad_bits)
elif self.state == 'GET DATA':
self.handle_get_data(lad, lad_bits)
elif self.state == 'GET TAR2':
self.handle_get_tar2(lad, lad_bits)
|
gpl-3.0
| 4,092,020,724,565,551,000
| 34.762295
| 88
| 0.53969
| false
| 3.297808
| false
| false
| false
|
Glottotopia/aagd
|
moin/local/moin/contrib/googleimport/driver.py
|
1
|
7520
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""
MoinMoin wiki project -> Google Project Hosting converter
Full of evil antipatterns, incl. Exception exceptions.
@copyright: 2007,2010 MoinMoin:AlexanderSchremmer
@license: GNU GPL, see COPYING for details.
"""
import sys
import re
import urllib2
from urllib import quote
import xmlrpclib
import csv
from MoinMoin.web.contexts import ScriptContext
from MoinMoin.Page import Page
# monkeypatch the formatter to avoid line_anchors:
from MoinMoin.formatter import text_html
text_html.line_anchors = False
request = ScriptContext(None, None)
class DataNotFoundException(Exception): pass
class Task(object):
def __init__(self, summary, desc, label, hours, mentors, difficulty, types):
self.summary = summary
self.label = label
self.hours = hours
self.mentors = mentors
self.difficulty = difficulty
self.types = types
page = Page(request, "")
page.set_raw_body(desc)
desc = request.redirectedOutput(page.send_page, content_only=1)
for s, r in [
('\n', ' '),
(' class="line862"', ''),
(' class="line867"', ''),
(' class="line874"', ''),
(' class="line891"', ''),
]:
desc = desc.replace(s, r)
self.desc = desc
def __repr__(self):
return (u"<Task summary=%r label=%r hours=%i mentors=%r difficulty=%r types=%r desc='''%s'''>" % (
self.summary, self.label, self.hours, self.mentors, self.difficulty,
self.types, self.desc[:100])).encode("utf-8")
def find_dict_entry(name, text):
m = re.search(r"^ %s:: (.*)$" % (name, ), text, re.M | re.U)
if not m:
raise DataNotFoundException("%s not found" % (name, ))
return m.groups()[0]
desc_pattern = r"""= Description =
([\s\S]*?)
= Discussion ="""
bugpage_pattern = r"""= Description =
([\s\S]*?)
="""
already_pushed_pages = set([x.strip() for x in """
""".split("\n")])
already_pushed_bugs = set([x.strip() for x in """
""".split("\n")])
gatherers = []
def first(s):
""" return first word or '' """
splitted = s.strip().split()
if splitted:
return splitted[0]
else:
return ''
class Collector(object):
def is_gatherer(function):
gatherers.append(function)
return function
def __init__(self, url):
self.url = url
self.server = xmlrpclib.ServerProxy(url + "?action=xmlrpc2")
def collect_tasks(self):
tasks = []
for gatherer in gatherers:
new = list(gatherer(self))
tasks.extend(new)
return tasks
@is_gatherer
def easytodo_pages(self):
pages = self.server.getAllPagesEx(dict(prefix="EasyToDo/"))
for page in pages:
if page in already_pushed_pages:
continue
page_contents = self.server.getPage(page)
try:
summary = find_dict_entry("Title", page_contents)
count = int(first(find_dict_entry("Count", page_contents)))
label = find_dict_entry("Tags", page_contents)
hours = int(first(find_dict_entry("Duration", page_contents)))
mentors = find_dict_entry("Mentors", page_contents)
difficulty = find_dict_entry("Difficulty", page_contents)
try:
types = find_dict_entry("Types", page_contents)
except DataNotFoundException:
# old tasks use "Type"
types = find_dict_entry("Type", page_contents)
except (DataNotFoundException, ValueError), e:
print >>sys.stderr, "Could not import %r because of %r" % (page, e)
continue
desc_m = re.search(desc_pattern, page_contents)
if not desc_m:
raise Exception("Could not import %r because Desc not found" % page)
desc = desc_m.groups()[0]
for i in range(1, count + 1):
text = desc
new_summary = summary
text += "\n\nYou can discuss this issue in the !MoinMoin wiki: %s" % (self.url + quote(page.encode("utf-8")), )
if count > 1:
text += "\n\nThis issue is available multiple times. This one is %i of %i." % (i, count)
new_summary += " %i/%i" % (i, count)
yield Task(new_summary, text, label, hours, mentors, difficulty, types)
#@is_gatherer
def moin_bugs(self):
pages = [pagename for pagename, contents in self.server.searchPages(r"t:MoinMoinBugs/ r:CategoryEasy\b")]
for page in pages:
bug_name = page.replace("MoinMoinBugs/", "")
if bug_name in already_pushed_bugs:
continue
page_contents = self.server.getPage(page)
m = re.search(bugpage_pattern, page_contents)
if not m:
raise Exception("Could not import %r because of bug desc not found" % page)
desc = m.groups()[0]
desc = "A user filed a bug report at the MoinMoin site. Here is a short description about the issue. A more detailed description is available at the MoinMoin wiki: %s\n\n" % (self.url + quote(page.encode("utf-8")), ) + desc
yield Task(bug_name, desc, "Code")
#@is_gatherer
def translation_items(self):
#languages = self.server.getPage(u"EasyToDoTranslation/Languages").strip().splitlines()
#languages = ["Lithuanian (lt)"]
languages = []
for language in languages:
page = u"EasyToDoTranslation"
page_contents = self.server.getPage(page)
page_contents = page_contents.replace("LANG", language)
summary = find_dict_entry("Summary", page_contents)
count = int(first(find_dict_entry("Count", page_contents)))
desc_m = re.search(desc_pattern, page_contents)
if not desc_m:
raise Exception("Could not import %r because Desc not found" % page)
desc = desc_m.groups()[0]
for i in range(1, count + 1):
text = desc
new_summary = summary
text += "\n\nA more detailed description of this task is available at the MoinMoin wiki: %s" % (self.url + quote(page.encode("utf-8")), )
if count > 1:
text += "\n\nThis task is available multiple times. This one is %i of %i." % (i, count)
new_summary += " %i/%i" % (i, count)
yield Task(new_summary, text, "Translation")
def pull_and_gencsv():
print >> sys.stderr, "Collecting tasks ..."
tasks = Collector("http://moinmo.in/").collect_tasks()
print >> sys.stderr, "Importing %i tasks ..." % (len(tasks), )
print >> sys.stderr, "\n".join(repr(task) for task in tasks)
summary_prefix = '' # "[TEST] " # EMPTY FOR PRODUCTION IMPORT!
tmin, tmax = 0, None
csvwriter = csv.writer(sys.stdout, delimiter=",", doublequote=True)
for task in tasks[tmin:tmax]:
csvwriter.writerow([summary_prefix + task.summary, task.desc, task.hours, task.mentors, task.difficulty, task.types, task.label])
if __name__ == "__main__":
pull_and_gencsv()
|
mit
| 5,125,018,128,756,069,000
| 35.788945
| 235
| 0.557181
| false
| 3.796063
| false
| false
| false
|
dims/heat
|
heat/tests/test_translation_rule.py
|
1
|
26801
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.hot import functions as hot_funcs
from heat.engine import parameters
from heat.engine import properties
from heat.engine import translation
from heat.tests import common
class TestTranslationRule(common.HeatTestCase):
def test_translation_rule(self):
for r in translation.TranslationRule.RULE_KEYS:
props = properties.Properties({}, {})
rule = translation.TranslationRule(
props,
r,
['any'],
['value'] if r == 'Add' else 'value',
'value_name' if r == 'Replace' else None,
'client_plugin' if r == 'Resolve' else None,
'finder' if r == 'Resolve' else None)
self.assertEqual(rule.properties, props)
self.assertEqual(rule.rule, r)
if r == 'Add':
self.assertEqual(['value'], rule.value)
else:
self.assertEqual('value', rule.value)
if r == 'Replace':
self.assertEqual('value_name', rule.value_name)
else:
self.assertIsNone(rule.value_name)
def test_invalid_translation_rule(self):
props = properties.Properties({}, {})
exc = self.assertRaises(ValueError,
translation.TranslationRule,
'proppy', mock.ANY,
mock.ANY)
self.assertEqual('Properties must be Properties type. '
'Found %s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
'EatTheCookie',
mock.ANY,
mock.ANY)
self.assertEqual('There is no rule EatTheCookie. List of allowed '
'rules is: Add, Replace, Delete, Resolve.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
'networks.network',
'value')
self.assertEqual('source_path should be a list with path instead of '
'%s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
[],
mock.ANY)
self.assertEqual('source_path must be non-empty list with path.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
mock.ANY,
'value_name')
self.assertEqual('Use value_name only for replacing list elements.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
'value')
self.assertEqual('value must be list type when rule is Add.',
six.text_type(exc))
def test_add_rule_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [
{'red': 'blue'}
],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertIn({'red': 'dak'}, props.get('far'))
def test_add_rule_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertEqual([{'red': 'dak'}], props.get('far'))
def test_add_rule_invalid(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': 'tran',
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[props.get('bar')])
exc = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Add rule must be used only for lists.',
six.text_type(exc))
def test_replace_rule_map_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': {'red': 'tran'},
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_map_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_list_different(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual([{'red': 'dak'}, {'red': 'dak'}], props.get('far'))
def test_replace_rule_list_same(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
),
'blue': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'blue': 'white'},
{'red': 'roses'}]
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
None,
'blue')
rule.execute_rule()
self.assertEqual([{'red': 'white', 'blue': None},
{'blue': None, 'red': 'roses'}],
props.get('far'))
def test_replace_rule_str(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertEqual('one', props.get('far'))
def test_replace_rule_str_value_path_error(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
ex = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Cannot use bar and far at the same time.',
six.text_type(ex))
def test_replace_rule_str_value_path(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertIsNone(props.get('far'))
def test_replace_rule_str_invalid(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.INTEGER)
}
data = {'far': 'one', 'bar': 2}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
exc = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual("Property error: bar: Value 'one' is not an integer",
six.text_type(exc))
def test_delete_rule_list(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far', 'red'])
rule.execute_rule()
self.assertEqual([{'red': None}, {'red': None}], props.get('far'))
def test_delete_rule_other(self):
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far'])
rule.execute_rule()
self.assertIsNone(props.get('far'))
def _test_resolve_rule(self, is_list=False):
class FakeClientPlugin(object):
def find_name_id(self, entity=None,
src_value='far'):
if entity == 'rose':
return 'pink'
return 'yellow'
if is_list:
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
else:
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
return FakeClientPlugin(), schema
def test_resolve_rule_list_populated(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_function(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {
'far': [{'red': 'blue'},
{'red': join_func}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_ref(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {
'far': [{'red': ref}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_list_empty(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([], props.get('far'))
def test_resolve_rule_other(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual('yellow', props.get('far'))
def test_resolve_rule_other_with_ref(self):
client_plugin, schema = self._test_resolve_rule()
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {'far': ref}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_function(self):
client_plugin, schema = self._test_resolve_rule()
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {'far': join_func}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_entity(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id',
entity='rose')
rule.execute_rule()
self.assertEqual('pink', props.get('far'))
def test_property_json_param_correct_translation(self):
"""Test case when property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
})
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(json_far='json_far'),
'get_param',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': param}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_path=['far', 'dar'])
rule.execute_rule()
self.assertEqual('rad', props.get('far').get('bar'))
def test_property_json_param_to_list_correct_translation(self):
"""Test case when list property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
}
))
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(json_far='json_far'),
'get_param',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': [param]}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_name='dar')
rule.execute_rule()
self.assertEqual([{'dar': None, 'bar': 'rad'}], props.get('far'))
def test_property_commadelimitedlist_param_correct_translation(self):
"""Test when property with sub-schema takes comma_delimited_list."""
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.STRING,
)
),
'boo': properties.Schema(
properties.Schema.STRING
)}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(list_far='list_far'),
'get_param',
'list_far')
param.parameters = {
'list_far': parameters.CommaDelimitedListParam(
'list_far',
{'Type': 'CommaDelimitedList'},
"white,roses").value()}
data = {'far': param, 'boo': 'chrysanthemums'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[props.get('boo')])
rule.execute_rule()
self.assertEqual(['white', 'roses', 'chrysanthemums'],
props.get('far'))
def test_property_no_translation_removed_function(self):
"""Test case when list property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
}
))
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.Removed(DummyStack(json_far='json_far'),
'Ref',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': [param]}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'bar'],
value_name='dar')
rule.execute_rule()
self.assertEqual([param], props.data.get('far'))
|
apache-2.0
| 5,504,096,451,595,284,000
| 32.543179
| 78
| 0.461065
| false
| 5.024559
| true
| false
| false
|
kpreid/shinysdr
|
setup.py
|
1
|
4716
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013, 2014, 2015, 2016, 2019 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
import os.path
import subprocess
import urllib
from setuptools import find_packages, setup, Command
from setuptools.command.build_py import build_py
ASSETS = {
'http://requirejs.org/docs/release/2.1.22/comments/require.js': 'shinysdr/deps/require.js',
'https://raw.githubusercontent.com/requirejs/text/646db27aaf2236cea92ac4107f32cbe5ae7a8d3a/text.js': 'shinysdr/deps/text.js'
}
class DownloadAssets(Command):
description = 'Download web app assets from external sites.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for source_url, destination_path in ASSETS.items():
if os.path.exists(destination_path):
print('skipping downloading {}, already exists'.format(destination_path))
else:
print('downloading {} to {}'.format(source_url, destination_path))
urllib.urlretrieve(source_url, destination_path)
class InitGitSubModules(Command):
description = 'Initialize Git submodules for dependencies.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('Initializing submodules...')
subprocess.call(['git', 'submodule', 'update', '--init'])
class FetchDeps(Command):
"""fetch dependencies command"""
description = 'gathers external dependencies from various sources'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command('git_init')
self.run_command('retrieve_assets')
class BuildPyCommand(build_py):
"""Customized build command to ensure deps are fetched before build."""
def run(self):
self.run_command('fetch_deps')
build_py.run(self)
setup(
name='ShinySDR',
# version='...', # No versioning is defined yet
description='Software-defined radio receiver application built on GNU Radio with a web-based UI and plugins.',
url='https://github.com/kpreid/shinysdr/',
author='Kevin Reid',
author_email='kpreid@switchb.org',
classifiers=[
# TODO: review/improve; this list was made by browsing <https://pypi.python.org/pypi?%3Aaction=list_classifiers>; can we add new items?
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Twisted',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: OS Independent', # will probably fail on notPOSIX due to lack of portability work, not fundamentally
'Topic :: Communications :: Ham Radio', # non-exclusively ham
],
license='GPLv3+',
packages=find_packages(),
include_package_data=True,
install_requires=[
# 'gnuradio', # Not PyPI
# 'osmosdr', # Not PyPI
'twisted',
'txws',
'ephem',
'six',
'pyserial', # undeclared dependency of twisted.internet.serialport
# Without the service_identity module, Twisted can perform only rudimentary TLS client hostname verification
'service_identity',
'pyasn1>=0.4.1,<0.5.0', # required to pin pyans1 support for pyasn1-modules
'pyasn1-modules', # required for service_identity
],
dependency_links=[],
# zip_safe: TODO: Investigate. I suspect unsafe due to serving web resources relative to __file__.
zip_safe=False,
entry_points={
'console_scripts': {
'shinysdr = shinysdr.main:main',
'shinysdr-import = shinysdr.db_import.tool:import_main'
}
},
cmdclass={
'git_init': InitGitSubModules,
'retrieve_assets': DownloadAssets,
'fetch_deps': FetchDeps,
'build_py': BuildPyCommand,
},
)
|
gpl-3.0
| 5,016,024,157,505,470,000
| 32.446809
| 143
| 0.658185
| false
| 3.840391
| false
| false
| false
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/twitch.py
|
1
|
20771
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
import json
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
ExtractorError,
float_or_none,
int_or_none,
orderedSet,
parse_duration,
parse_iso8601,
qualities,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urljoin,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'https://usher.ttvnw.net'
_LOGIN_FORM_URL = 'https://www.twitch.tv/login'
_LOGIN_POST_URL = 'https://passport.twitch.tv/login'
_CLIENT_ID = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _call_api(self, path, item_id, *args, **kwargs):
headers = kwargs.get('headers', {}).copy()
headers['Client-ID'] = self._CLIENT_ID
kwargs['headers'] = headers
response = self._download_json(
'%s/%s' % (self._API_BASE, path), item_id,
*args, **compat_kwargs(kwargs))
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
def fail(message):
raise ExtractorError(
'Unable to login. Twitch said: %s' % message, expected=True)
def login_step(page, urlh, note, data):
form = self._hidden_inputs(page)
form.update(data)
page_url = urlh.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page,
'post url', default=self._LOGIN_POST_URL, group='url')
post_url = urljoin(page_url, post_url)
headers = {
'Referer': page_url,
'Origin': page_url,
'Content-Type': 'text/plain;charset=UTF-8',
}
response = self._download_json(
post_url, None, note, data=json.dumps(form).encode(),
headers=headers, expected_status=400)
error = response.get('error_description') or response.get('error_code')
if error:
fail(error)
if 'Authenticated successfully' in response.get('message', ''):
return None, None
redirect_url = urljoin(
post_url,
response.get('redirect') or response['redirect_path'])
return self._download_webpage_handle(
redirect_url, None, 'Downloading login redirect page',
headers=headers)
login_page, handle = self._download_webpage_handle(
self._LOGIN_FORM_URL, None, 'Downloading login page')
# Some TOR nodes and public proxies are blocked completely
if 'blacklist_message' in login_page:
fail(clean_html(login_page))
redirect_page, handle = login_step(
login_page, handle, 'Logging in', {
'username': username,
'password': password,
'client_id': self._CLIENT_ID,
})
# Successful login
if not redirect_page:
return
if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None:
# TODO: Add mechanism to request an SMS or phone call
tfa_token = self._get_tfa_info('two-factor authentication token')
login_step(redirect_page, handle, 'Submitting TFA token', {
'authy_token': tfa_token,
'remember_2fa': 'true',
})
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._call_api(
'kraken/videos/%s%s' % (item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._call_api(
'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
status = info.get('status')
if status == 'recording':
is_live = True
elif status == 'recorded':
is_live = False
else:
is_live = None
return {
'id': info['_id'],
'title': info.get('title') or 'Untitled Broadcast',
'description': info.get('description'),
'duration': int_or_none(info.get('length')),
'thumbnail': info.get('preview'),
'uploader': info.get('channel', {}).get('display_name'),
'uploader_id': info.get('channel', {}).get('name'),
'timestamp': parse_iso8601(info.get('recorded_at')),
'view_count': int_or_none(info.get('views')),
'is_live': is_live,
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
'skip': 'HTTP Error 404: Not Found',
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/|
player\.twitch\.tv/\?.*?\bvideo=v
)
(?P<id>\d+)
'''
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TESTS = [{
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# Untitled broadcast (title is None)
'url': 'http://www.twitch.tv/belkao_o/v/11230755',
'info_dict': {
'id': 'v11230755',
'ext': 'mp4',
'title': 'Untitled Broadcast',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1638,
'timestamp': 1439746708,
'upload_date': '20150816',
'uploader': 'BelkAO_o',
'uploader_id': 'belkao_o',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/videos/6528877',
'only_matching': True,
}, {
'url': 'https://m.twitch.tv/beagsandjam/v/247478721',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/northernlion/video/291940395',
'only_matching': True,
}]
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._call_api(
'api/vods/%s/access_token' % item_id, item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
item_id, 'mp4', entry_protocol='m3u8_native')
self._prefer_source(formats)
info['formats'] = formats
parsed_url = compat_urllib_parse_urlparse(url)
query = compat_parse_qs(parsed_url.query)
if 't' in query:
info['start_time'] = parse_duration(query['t'][0])
if info.get('timestamp') is not None:
info['subtitles'] = {
'rechat': [{
'url': update_url_query(
'https://rechat.twitch.tv/rechat-messages', {
'video_id': 'v%s' % item_id,
'start': info['timestamp'],
}),
'ext': 'json',
}],
}
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._call_api(
'kraken/channels/%s' % channel_id,
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
broken_paging_detected = False
counter_override = None
for counter in itertools.count(1):
response = self._call_api(
self._PLAYLIST_PATH % (channel_id, offset, limit),
channel_id,
'Downloading %s JSON page %s'
% (self._PLAYLIST_TYPE, counter_override or counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
total = int_or_none(response.get('_total'))
# Since the beginning of March 2016 twitch's paging mechanism
# is completely broken on the twitch side. It simply ignores
# a limit and returns the whole offset number of videos.
# Working around by just requesting all videos at once.
# Upd: pagination bug was fixed by twitch on 15.03.2016.
if not broken_paging_detected and total and len(page_entries) > limit:
self.report_warning(
'Twitch pagination is broken on twitch side, requesting all videos at once',
channel_id)
broken_paging_detected = True
offset = total
counter_override = '(all at once)'
continue
entries.extend(page_entries)
if broken_paging_detected or total and len(page_entries) >= total:
break
offset += limit
return self.playlist_result(
[self._make_url_result(entry) for entry in orderedSet(entries)],
channel_id, channel_name)
def _make_url_result(self, url):
try:
video_id = 'v%s' % TwitchVodIE._match_id(url)
return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
except AssertionError:
return self.url_result(url)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TESTS = [{
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}, {
'url': 'http://m.twitch.tv/vanillatv/profile',
'only_matching': True,
}]
class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
_VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
class TwitchAllVideosIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:all'
_VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
_PLAYLIST_TYPE = 'all videos'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/all',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 869,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/all',
'only_matching': True,
}]
class TwitchUploadsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:uploads'
_VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
_PLAYLIST_TYPE = 'uploads'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/uploads',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/uploads',
'only_matching': True,
}]
class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:past-broadcasts'
_VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
_PLAYLIST_TYPE = 'past broadcasts'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 0,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/past-broadcasts',
'only_matching': True,
}]
class TwitchHighlightsIE(TwitchVideosBaseIE):
IE_NAME = 'twitch:videos:highlights'
_VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
_PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
_PLAYLIST_TYPE = 'highlights'
_TESTS = [{
'url': 'https://www.twitch.tv/spamfish/videos/highlights',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 805,
}, {
'url': 'https://m.twitch.tv/spamfish/videos/highlights',
'only_matching': True,
}]
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'''(?x)
https?://
(?:
(?:(?:www|go|m)\.)?twitch\.tv/|
player\.twitch\.tv/\?.*?\bchannel=
)
(?P<id>[^/#?]+)
'''
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}, {
'url': 'https://player.twitch.tv/?channel=lotsofs',
'only_matching': True,
}, {
'url': 'https://go.twitch.tv/food',
'only_matching': True,
}, {
'url': 'https://m.twitch.tv/food',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if any(ie.suitable(url) for ie in (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchAllVideosIE,
TwitchUploadsIE,
TwitchPastBroadcastsIE,
TwitchHighlightsIE,
TwitchClipsIE))
else super(TwitchStreamIE, cls).suitable(url))
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._call_api(
'kraken/streams/%s?stream_type=all' % channel_id, channel_id,
'Downloading stream JSON').get('stream')
if not stream:
raise ExtractorError('%s is offline' % channel_id, expected=True)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._call_api(
'api/channels/%s/access_token' % channel_id, channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
class TwitchClipsIE(TwitchBaseIE):
IE_NAME = 'twitch:clips'
_VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:[^/]+/)*|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
'id': '42850523',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1465767393,
'upload_date': '20160612',
'creator': 'EA',
'uploader': 'stereotype_',
'uploader_id': '43566419',
},
}, {
# multiple formats
'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
'only_matching': True,
}, {
'url': 'https://www.twitch.tv/sergeynixon/clip/StormyThankfulSproutFutureMan',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
status = self._download_json(
'https://clips.twitch.tv/api/v2/clips/%s/status' % video_id,
video_id)
formats = []
for option in status['quality_options']:
if not isinstance(option, dict):
continue
source = url_or_none(option.get('source'))
if not source:
continue
formats.append({
'url': source,
'format_id': option.get('quality'),
'height': int_or_none(option.get('quality')),
'fps': int_or_none(option.get('frame_rate')),
})
self._sort_formats(formats)
info = {
'formats': formats,
}
clip = self._call_api(
'kraken/clips/%s' % video_id, video_id, fatal=False, headers={
'Accept': 'application/vnd.twitchtv.v5+json',
})
if clip:
quality_key = qualities(('tiny', 'small', 'medium'))
thumbnails = []
thumbnails_dict = clip.get('thumbnails')
if isinstance(thumbnails_dict, dict):
for thumbnail_id, thumbnail_url in thumbnails_dict.items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail_url,
'preference': quality_key(thumbnail_id),
})
info.update({
'id': clip.get('tracking_id') or video_id,
'title': clip.get('title') or video_id,
'duration': float_or_none(clip.get('duration')),
'views': int_or_none(clip.get('views')),
'timestamp': unified_timestamp(clip.get('created_at')),
'thumbnails': thumbnails,
'creator': try_get(clip, lambda x: x['broadcaster']['display_name'], compat_str),
'uploader': try_get(clip, lambda x: x['curator']['display_name'], compat_str),
'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str),
})
else:
info.update({
'title': video_id,
'id': video_id,
})
return info
|
gpl-3.0
| -2,672,165,837,656,433,700
| 27.610193
| 108
| 0.628135
| false
| 2.771684
| false
| false
| false
|
ronen/Halide
|
python_bindings/tutorial/lesson_12_using_the_gpu.py
|
1
|
11813
|
#!/usr/bin/python3
# Halide tutorial lesson 12.
# This lesson demonstrates how to use Halide to run code on a GPU.
# This lesson can be built by invoking the command:
# make tutorial_lesson_12_using_the_gpu
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -lpthread -ldl -o lesson_12
# LD_LIBRARY_PATH=../bin ./lesson_12
# On os x:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -o lesson_12
# DYLD_LIBRARY_PATH=../bin ./lesson_12
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
# Include some support code for loading pngs.
#include "image_io.h"
from scipy.misc import imread
import os.path
# Include a clock to do performance testing.
#include "clock.h"
from datetime import datetime
# Define some Vars to use.
x, y, c, i = Var("x"), Var("y"), Var("c"), Var("i")
# We're going to want to schedule a pipeline in several ways, so we
# define the pipeline in a class so that we can recreate it several
# times with different schedules.
class MyPipeline:
def __init__(self, input):
assert type(input) == Buffer_uint8
self.lut = Func("lut")
self.padded = Func("padded")
self.padded16 = Func("padded16")
self.sharpen = Func("sharpen")
self.curved = Func("curved")
self.input = input
# For this lesson, we'll use a two-stage pipeline that sharpens
# and then applies a look-up-table (LUT).
# First we'll define the LUT. It will be a gamma curve.
self.lut[i] = cast(UInt(8), clamp(pow(i / 255.0, 1.2) * 255.0, 0, 255))
# Augment the input with a boundary condition.
self.padded[x, y, c] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1), c]
# Cast it to 16-bit to do the math.
self.padded16[x, y, c] = cast(UInt(16), self.padded[x, y, c])
# Next we sharpen it with a five-tap filter.
self.sharpen[x, y, c] = (self.padded16[x, y, c] * 2-
(self.padded16[x - 1, y, c] +
self.padded16[x, y - 1, c] +
self.padded16[x + 1, y, c] +
self.padded16[x, y + 1, c]) / 4)
# Then apply the LUT.
self.curved[x, y, c] = self.lut[self.sharpen[x, y, c]]
# Now we define methods that give our pipeline several different
# schedules.
def schedule_for_cpu(self):
# Compute the look-up-table ahead of time.
self.lut.compute_root()
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Look-up-tables don't vectorize well, so just parallelize
# curved in slices of 16 scanlines.
yo, yi = Var("yo"), Var("yi")
self.curved.split(y, yo, yi, 16) \
.parallel(yo)
# Compute sharpen as needed per scanline of curved, reusing
# previous values computed within the same strip of 16
# scanlines.
self.sharpen.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Vectorize the sharpen. It's 16-bit so we'll vectorize it 8-wide.
self.sharpen.vectorize(x, 8)
# Compute the padded input at the same granularity as the
# sharpen. We'll leave the cast to 16-bit inlined into
# sharpen.
self.padded.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Also vectorize the padding. It's 8-bit, so we'll vectorize
# 16-wide.
self.padded.vectorize(x, 16)
# JIT-compile the pipeline for the CPU.
self.curved.compile_jit()
return
# Now a schedule that uses CUDA or OpenCL.
def schedule_for_gpu(self):
# We make the decision about whether to use the GPU for each
# Func independently. If you have one Func computed on the
# CPU, and the next computed on the GPU, Halide will do the
# copy-to-gpu under the hood. For this pipeline, there's no
# reason to use the CPU for any of the stages. Halide will
# copy the input image to the GPU the first time we run the
# pipeline, and leave it there to reuse on subsequent runs.
# As before, we'll compute the LUT once at the start of the
# pipeline.
self.lut.compute_root()
# Let's compute the look-up-table using the GPU in 16-wide
# one-dimensional thread blocks. First we split the index
# into blocks of size 16:
block, thread = Var("block"), Var("thread")
self.lut.split(i, block, thread, 16)
# Then we tell cuda that our Vars 'block' and 'thread'
# correspond to CUDA's notions of blocks and threads, or
# OpenCL's notions of thread groups and threads.
self.lut.gpu_blocks(block) \
.gpu_threads(thread)
# This is a very common scheduling pattern on the GPU, so
# there's a shorthand for it:
# lut.gpu_tile(i, 16)
# Func::gpu_tile method is similar to Func::tile, except that
# it also specifies that the tile coordinates correspond to
# GPU blocks, and the coordinates within each tile correspond
# to GPU threads.
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Compute curved in 2D 8x8 tiles using the GPU.
self.curved.gpu_tile(x, y, 8, 8)
# This is equivalent to:
# curved.tile(x, y, xo, yo, xi, yi, 8, 8)
# .gpu_blocks(xo, yo)
# .gpu_threads(xi, yi)
# We'll leave sharpen as inlined into curved.
# Compute the padded input as needed per GPU block, storing the
# intermediate result in shared memory. Var::gpu_blocks, and
# Var::gpu_threads exist to help you schedule producers within
# GPU threads and blocks.
self.padded.compute_at(self.curved, Var.gpu_blocks())
# Use the GPU threads for the x and y coordinates of the
# padded input.
self.padded.gpu_threads(x, y)
# JIT-compile the pipeline for the GPU. CUDA or OpenCL are
# not enabled by default. We have to construct a Target
# object, enable one of them, and then pass that target
# object to compile_jit. Otherwise your CPU will very slowly
# pretend it's a GPU, and use one thread per output pixel.
# Start with a target suitable for the machine you're running
# this on.
target = get_host_target()
# Then enable OpenCL or CUDA.
#use_opencl = False
use_opencl = True
if use_opencl:
# We'll enable OpenCL here, because it tends to give better
# performance than CUDA, even with NVidia's drivers, because
# NVidia's open source LLVM backend doesn't seem to do all
# the same optimizations their proprietary compiler does.
target.set_feature(TargetFeature.OpenCL)
print("(Using OpenCL)")
else:
# Uncomment the next line and comment out the line above to
# try CUDA instead.
target.set_feature(TargetFeature.CUDA)
print("(Using CUDA)")
# If you want to see all of the OpenCL or CUDA API calls done
# by the pipeline, you can also enable the Debug
# flag. This is helpful for figuring out which stages are
# slow, or when CPU -> GPU copies happen. It hurts
# performance though, so we'll leave it commented out.
# target.set_feature(TargetFeature.Debug)
self.curved.compile_jit(target)
def test_performance(self):
# Test the performance of the scheduled MyPipeline.
output = Buffer(UInt(8),
self.input.width(),
self.input.height(),
self.input.channels())
# Run the filter once to initialize any GPU runtime state.
self.curved.realize(output)
# Now take the best of 3 runs for timing.
best_time = float("inf")
for i in range(3):
t1 = datetime.now()
# Run the filter 100 times.
for j in range(100):
self.curved.realize(output)
# Force any GPU code to finish by copying the buffer back to the CPU.
output.copy_to_host()
t2 = datetime.now()
elapsed = (t2 - t1).total_seconds()
if elapsed < best_time:
best_time = elapsed
# end of "best of three times"
print("%1.4f milliseconds" % (best_time * 1000))
def test_correctness(self, reference_output):
assert type(reference_output) == Buffer_uint8
output = self.curved.realize(self.input.width(),
self.input.height(),
self.input.channels())
assert type(output) == Buffer_uint8
# Check against the reference output.
for c in range(self.input.channels()):
for y in range(self.input.height()):
for x in range(self.input.width()):
if output(x, y, c) != reference_output(x, y, c):
print(
"Mismatch between output (%d) and "
"reference output (%d) at %d, %d, %d" % (
output(x, y, c),
reference_output(x, y, c),
x, y, c))
return
print("CPU and GPU outputs are consistent.")
def main():
# Load an input image.
image_path = os.path.join(os.path.dirname(__file__), "../../tutorial/images/rgb.png")
input_data = imread(image_path)
input = Buffer(input_data)
# Allocated an image that will store the correct output
reference_output = Buffer(UInt(8), input.width(), input.height(), input.channels())
print("Testing performance on CPU:")
p1 = MyPipeline(input)
p1.schedule_for_cpu()
p1.test_performance()
p1.curved.realize(reference_output)
if have_opencl():
print("Testing performance on GPU:")
p2 = MyPipeline(input)
p2.schedule_for_gpu()
p2.test_performance()
p2.test_correctness(reference_output)
else:
print("Not testing performance on GPU, "
"because I can't find the opencl library")
return 0
def have_opencl():
"""
A helper function to check if OpenCL seems to exist on this machine.
:return: bool
"""
import ctypes
import platform
try:
if platform.system() == "Windows":
ret = ctypes.windll.LoadLibrary("OpenCL.dll") != None
elif platform.system() == "Darwin": # apple
ret = ctypes.cdll.LoadLibrary("/System/Library/Frameworks/OpenCL.framework/Versions/Current/OpenCL") != None
elif platform.system() == "Linux":
ret = ctypes.cdll.LoadLibrary("libOpenCL.so") != None
else:
raise Exception("Cannot check for opencl presence "
"on unknown system '%s'" % platform.system())
except OSError:
ret = False
return ret
if __name__ == "__main__":
main()
|
mit
| 7,020,487,303,896,136,000
| 34.79697
| 130
| 0.584018
| false
| 3.794732
| true
| false
| false
|
pathscale/ninja
|
platform_helper.py
|
1
|
2126
|
#!/usr/bin/env python
# Copyright 2011 Google Inc.
# Copyright 2013 Patrick von Reth <vonreth@kde.org>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc']
class Platform( object ):
def __init__( self, platform):
self._platform = platform
if not self._platform is None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris'):
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_freebsd(self):
return self._platform == 'freebsd'
def is_openbsd(self):
return self._platform == 'openbsd'
def is_sunos5(self):
return self._platform == 'sunos5'
|
apache-2.0
| 9,217,081,902,934,273,000
| 29.811594
| 74
| 0.629351
| false
| 3.988743
| false
| false
| false
|
OpenHydrology/OH-Auto-Statistical-REST-API
|
application.py
|
1
|
2633
|
import flask
import flask_restful
import flask.ext.cors
from celery import Celery
from resources.analysis import AnalysisRes, AnalysisStatusRes
from resources.catchment import CatchmentListRes, CatchmentRes
from resources.dataimport import DataImportRes
import floodestimation
import floodestimation.loaders
import floodestimation.fehdata
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
from sqlalchemy.orm import sessionmaker
class Application(object):
def __init__(self, settings):
self.flask_app = flask.Flask(__name__)
self.flask_app.config.from_object(settings)
flask.ext.cors.CORS(self.flask_app, resources=r'/api/*', allow_headers=['Content-Type', 'Authorization'],
expose_headers=['Location'])
self.rest_api = flask_restful.Api(self.flask_app)
self.db = floodestimation.db
self.db.engine = create_engine(self.flask_app.config['DATABASE_URL'])
self.db.metadata = MetaData(bind=self.db.engine, reflect=True)
self.db.Session = sessionmaker(bind=self.db.engine)
self._set_db_session()
self._set_routes()
def _set_routes(self):
self.rest_api.add_resource(AnalysisRes, '/api/v0/analyses/', endpoint='post_analysis')
self.rest_api.add_resource(AnalysisRes, '/api/v0/analyses/<task_id>', endpoint='get_analysis')
self.rest_api.add_resource(AnalysisStatusRes, '/api/v0/analysis-tasks/<task_id>', endpoint='analysis_status')
self.rest_api.add_resource(CatchmentListRes, '/api/v0/catchments/')
self.rest_api.add_resource(CatchmentRes, '/api/v0/catchments/<int:catchment_id>')
self.rest_api.add_resource(DataImportRes, '/api/v0/data-imports/')
def _set_db_session(self):
@self.flask_app.before_request
def before_request():
flask.g.db_session = self.db.Session()
@self.flask_app.teardown_request
def teardown_request(exception):
db_session = getattr(flask.g, 'db_session', None)
if db_session is not None:
db_session.close()
def celery(self):
app = self.flask_app
celery = Celery(app.import_name)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
def start_app(self):
self.flask_app.run()
|
gpl-3.0
| -304,302,401,893,663,360
| 35.569444
| 117
| 0.651348
| false
| 3.708451
| false
| false
| false
|
eduardoklosowski/ergo-notes
|
ergonotes/admin.py
|
1
|
1098
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Eduardo Augusto Klosowski
#
# This file is part of Ergo Notes.
#
# Ergo Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ergo Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Ergo Notes. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from . import models
@admin.register(models.Note)
class NoteAdmin(admin.ModelAdmin):
list_display = ('user', 'priority', 'title', 'show_on_home', 'create_on', 'modify_on', 'markup')
list_display_links = ('title',)
list_filter = ('priority', 'markup')
search_fields = ('=user', 'title')
|
agpl-3.0
| -931,401,604,839,511,800
| 34.419355
| 100
| 0.720401
| false
| 3.635762
| false
| false
| false
|
zork9/pygame-pyMM
|
maproomdungeon.py
|
1
|
4377
|
# Copyright (c) 2013 Johan Ceuppens.
# All rights reserved.
# Redistribution and use in source and binary forms are permitted
# provided that the above copyright notice and this paragraph are
# duplicated in all such forms and that any documentation,
# advertising materials, and other materials related to such
# distribution and use acknowledge that the software was developed
# by the Johan Ceuppens. The name of the
# Johan Ceuppens may not be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
# Copyright (C) Johan Ceuppens 2010
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
from pygame.locals import *
from time import *
from maproom import *
from wall import *
class MaproomDungeon(MaproomBase):
"Room with a (big) map"
def __init__(self,x,y):
MaproomBase.__init__(self,x,y)
self.northwalls = []
self.southwalls = []
self.westwalls = []
self.eastwalls= []
self.gameobjects = []
self.tileboxes = []
self.pits = []
self.ropes = []
self.ladders = []
self.bullets = []
def addnorthwall(self, x,y,w,h,imagefilename):
self.northwalls.append(Wall(x,y,w,h,imagefilename))
def addsouthwall(self, x,y,w,h,imagefilename):
self.southwalls.append(Wall(x,y,w,h,imagefilename))
def addwestwall(self, x,y,w,h,imagefilename):
self.westwalls.append(Wall(x,y,w,h,imagefilename))
def addeastwall(self, x,y,w,h,imagefilename):
self.eastwalls.append(Wall(x,y,w,h,imagefilename))
def draw(self,screen):
##print "x=%d" % self.relativex
screen.blit(self.background, (0+self.relativex, 0+self.relativey))
for w in self.northwalls:
w.draw(screen,self)
for w in self.southwalls:
w.draw(screen,self)
for w in self.westwalls:
w.draw(screen,self)
for w in self.eastwalls:
w.draw(screen,self)
def collidewithladders(self, player):
for i in self.ladders:
if i != None and i.collidewithladder(self, player):
return 2
return 0
def collidewithladdersdown(self, player):
for i in self.ladders:
if i != None and i.collidewithladderdown(self, player):
return 2
return 0
# NOTE player can be enemy
def collide(self, player,hploss):
for i in self.gameobjects:
#print "go> %s" % i
if i != None and i.collide(self,player,hploss): ### NOTE hp loss of hploss
return 2 # 1 kills game
for i in self.northwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.southwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.westwalls:
if i != None and i.collide(self,player,hploss):
return 2
for i in self.eastwalls:
if i != None and i.collide(self,player,hploss):
return 2
# for i in self.tileboxes:
# if i != None and i.collide(self,player,hploss):
# #self.undomove()
# # FIXME self.undomove()
# return 2
# for i in self.pits:
# if i != None and i.collide(self,player,hploss):
# return 2
return 0
def collidewithenemy(self, enemy):
for t in self.tileboxes:
if t != None and t.collidewithenemy(self,enemy):
enemy.undomove()
return 2 # 1 kills game
return 0
def fall(self, player):
self.moveup()
for i in self.gameobjects:
if i != None and i.fallcollide(self, player):
self.movedown()
return 2 # 1 kills game
return 0
|
gpl-2.0
| 2,741,953,282,796,637,700
| 31.422222
| 79
| 0.664839
| false
| 3.343774
| false
| false
| false
|
gofed/gofed-ng
|
services/deps/service.py
|
1
|
4379
|
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import os
import shutil
import sys
from common.service.computationalService import ComputationalService
from common.service.serviceEnvelope import ServiceEnvelope
from common.service.action import action
from common.service.serviceResult import ServiceResult
from common.system.extractedRpmFile import ExtractedRpmFile
from common.system.extractedSrpmFile import ExtractedSrpmFile
from common.system.extractedTarballFile import ExtractedTarballFile
import gofedlib.gosymbolsextractor as gofedlib
class DepsService(ComputationalService):
''' Dependencies checks '''
def signal_process(self):
self.tmpfile_path = None
self.extracted1_path = None
self.extracted2_path = None
def signal_processed(self, was_error):
if self.tmpfile_path is not None:
os.remove(self.tmpfile_path)
if self.extracted1_path is not None:
shutil.rmtree(self.extracted1_path)
if self.extracted2_path is not None:
shutil.rmtree(self.extracted2_path)
@action
def deps_analysis(self, file_id, opts=None):
'''
Get deps of a file
@param file_id: file to be analysed
@param opts: additional analysis opts
@return: list of dependencies
'''
ret = ServiceResult()
default_opts = {'language': 'detect', 'tool': 'default'}
if opts is None:
opts = default_opts
else:
default_opts.update(opts)
opts = default_opts
self.tmpfile_path = self.get_tmp_filename()
with self.get_system() as system:
f = system.download(file_id, self.tmpfile_path)
self.extracted1_path = self.get_tmp_dirname()
d = f.unpack(self.extracted1_path)
if isinstance(d, ExtractedRpmFile):
src_path = d.get_content_path()
elif isinstance(d, ExtractedTarballFile):
src_path = d.get_path()
elif isinstance(d, ExtractedSrpmFile):
# we have to unpack tarball first
t = d.get_tarball()
self.extracted2_path = self.get_tmp_dirname()
d = f.unpack(self.extracted2_path)
src_path = d.get_path()
else:
raise ValueError("Filetype %s cannot be processed" % (d.get_type(),))
# TODO: handle opts
try:
ret.result = gofedlib.project_packages(src_path)
except:
exc_info = sys.exc_info()
ret.meta['error'] = [ str(exc_info[0]), str(exc_info[1]), str(exc_info[2])]
finally:
ret.meta['language'] = 'golang'
ret.meta['tool'] = 'gofedlib'
return ret
@action
def deps_diff(self, deps1, deps2, opts=None):
'''
Make a diff of dependencies
@param deps1: the first dependency list
@param deps2: the second dependency list
@param opts: additional analysis opts
@return: list of dependency differences
'''
default_opts = {'language': 'detect', 'tool': 'default'}
ret = ServiceResult()
if opts is None:
opts = default_opts
else:
default_opts.update(opts)
opts = default_opts
# TODO: implement deps difference
raise NotImplementedError("Currently not implemented")
return ret
if __name__ == "__main__":
ServiceEnvelope.serve(DepsService)
|
gpl-3.0
| 6,155,984,285,590,203,000
| 33.480315
| 87
| 0.622745
| false
| 4.081081
| false
| false
| false
|
lcy-seso/models
|
fluid/ocr_recognition/_ce.py
|
1
|
1463
|
# this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi, DurationKpi, AccKpi
# NOTE kpi.py should shared in models in some way!!!!
train_cost_kpi = CostKpi('train_cost', 0.05, 0, actived=True)
test_acc_kpi = AccKpi('test_acc', 0.005, 0, actived=True)
train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=True)
train_acc_kpi = AccKpi('train_acc', 0.005, 0, actived=True)
tracking_kpis = [
train_acc_kpi,
train_cost_kpi,
test_acc_kpi,
train_duration_kpi,
]
def parse_log(log):
'''
This method should be implemented by model developers.
The suggestion:
each line in the log should be key, value, for example:
"
train_cost\t1.0
test_cost\t1.0
train_cost\t1.0
train_cost\t1.0
train_acc\t1.2
"
'''
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
log_to_ce(log)
|
apache-2.0
| -8,328,208,520,477,808,000
| 22.983607
| 73
| 0.608339
| false
| 2.808061
| false
| false
| false
|
openenglishbible/USFM-Tools
|
transform/support/asciiRenderer.py
|
1
|
5744
|
# -*- coding: utf-8 -*-
#
import codecs
import io
import os
import textwrap
import abstractRenderer
#
# Simplest renderer. Ignores everything except ascii text.
#
class Renderer(abstractRenderer.AbstractRenderer):
def __init__(self, inputDir, outputDir, outputName, config):
self.identity = 'ascii renderer'
self.outputDescription = os.path.join(outputDir, outputName + '.txt')
abstractRenderer.AbstractRenderer.__init__(self, inputDir, outputDir, outputName, config)
# Unset
self.f = None # output file stream
# IO
self.inputDir = inputDir
self.outputFilename = os.path.join(outputDir, outputName + '.txt')
# Flags
self.d = False
self.narrower = False
self.inX = False
self.inND = False
def render(self):
self.f = io.StringIO()
self.loadUSFM(self.inputDir)
self.run()
v = self.f.getvalue()
self.f.close()
encoding=self.config.get('Plain Text','encoding')
if encoding == 'ascii':
self.logger.debug('Converting to ascii')
v = self.clean(v)
if self.config.get('Plain Text','wrapping'):
self.logger.debug('Wrapping')
v = self.wrap(v)
o = open(self.outputFilename, 'w', encoding=encoding)
o.write(v)
o.close()
self.logger.debug('Saved as ' + encoding)
# Support
def wrap(self, t):
nl = ''
for i in t.split('\n'):
nl = nl + textwrap.fill(i, width=80) + '\n'
return nl
def clean(self, text):
t = text.replace('‘', "'")
t = t.replace('’', "'")
t = t.replace('“', '"')
t = t.replace('”', '"')
t = t.replace('—', '--') # mdash
t = t.replace('\u2013', '--') # ndash
t = t.replace('\u2026', '...') # ellipsis
return t
def startNarrower(self, n):
s = '\n'
if not self.narrower: s = s + '\n'
self.narrower = True
return s + ' ' * n
def stopNarrower(self):
self.narrower = False
return ''
def startD(self):
self.d = True
return ''
def stopD(self):
self.d = False
return ''
def escape(self, text):
t = text
if self.inX:
return ''
t = t.upper() if self.inND else t
return t
def box(self, text):
t = (80 * '#') + '\n'
t = t + '#' + (78 * ' ') + '#\n'
t = t + '#' + text.center(78) + '#\n'
t = t + '#' + (78 * ' ') + '#\n'
t = t + (80 * '#') + '\n'
return t
def center(self, text):
return text.center(80)
# Tokens
def render_h(self, token): self.f.write('\n\n\n' + self.box(token.value) + '\n\n')
def render_mt1(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_mt2(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_mt3(self, token): self.f.write(self.center(token.value.upper()) + '\n')
def render_ms1(self, token): self.f.write('\n\n' + self.center('[' + token.value + ']') + '\n\n')
def render_ms2(self, token): self.f.write('\n\n' + self.center('[' + token.value + ']') + '\n\n')
def render_m(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n')
def render_p(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
# Ignore indenting
def render_pi(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
def render_b(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n ')
def render_s1(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n\n*' + token.value + '*\n ')
def render_s2(self, token): self.f.write(self.stopD() + self.stopNarrower() + '\n\n*' + token.value + '*\n ')
def render_c(self, token): self.f.write(' ' )
def render_v(self, token): self.f.write(' ' )
def render_text(self, token): self.f.write(self.escape(token.value))
def render_q(self, token): self.f.write(self.stopD() + self.startNarrower(1))
def render_q1(self, token): self.f.write(self.stopD() + self.startNarrower(1))
def render_q2(self, token): self.f.write(self.stopD() + self.startNarrower(2))
def render_q3(self, token): self.f.write(self.stopD() + self.startNarrower(3))
def render_nb(self, token): self.f.write(self.stopD() + self.stopNarrower() + "\n\n")
def render_li(self, token): self.f.write(' ')
def render_d(self, token): self.f.write(self.startD())
def render_sp(self, token): self.f.write(self.startD())
def render_pbr(self, token): self.f.write('\n')
def render_nd_s(self, token): self.inND = True
def render_nd_e(self, token): self.inND = False
# Ignore...
def render_x_s(self,token): self.inX = True
def render_x_e(self,token): self.inX = False
# Irrelevant
def render_pb(self,token): pass
def render_wj_s(self,token): pass
def render_wj_e(self,token): pass
def render_qs_s(self, token): pass
def render_qs_e(self, token): pass
def render_em_s(self, token): pass
def render_em_e(self, token): pass
def render_f_s(self,token): self.f.write('{ ')
def render_f_e(self,token): self.f.write(' }')
def render_fr(self, token): self.f.write('(' + self.escape(token.value) + ') ')
def render_ft(self, token): pass
def render_periph(self, token): pass
|
mit
| -775,057,629,401,138,200
| 34.8375
| 121
| 0.537147
| false
| 3.162714
| false
| false
| false
|
ophiry/dvc
|
dvc/logger.py
|
1
|
1532
|
import sys
import logging
import colorama
colorama.init()
class Logger(object):
DEFAULT_LEVEL = logging.INFO
LEVEL_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'error': logging.ERROR
}
COLOR_MAP = {
'debug': colorama.Fore.BLUE,
'warn': colorama.Fore.YELLOW,
'error': colorama.Fore.RED
}
logging.basicConfig(stream=sys.stdout, format='%(message)s', level=DEFAULT_LEVEL)
_logger = logging.getLogger('dvc')
@staticmethod
def set_level(level):
Logger._logger.setLevel(Logger.LEVEL_MAP.get(level.lower(), logging.DEBUG))
@staticmethod
def be_quiet():
Logger._logger.setLevel(logging.CRITICAL)
@staticmethod
def be_verbose():
Logger._logger.setLevel(logging.DEBUG)
@staticmethod
def colorize(msg, typ):
header = ''
footer = ''
if sys.stdout.isatty():
header = Logger.COLOR_MAP.get(typ.lower(), '')
footer = colorama.Style.RESET_ALL
return u'{}{}{}'.format(header, msg, footer)
@staticmethod
def error(msg):
return Logger._logger.error(Logger.colorize(msg, 'error'))
@staticmethod
def warn(msg):
return Logger._logger.warn(Logger.colorize(msg, 'warn'))
@staticmethod
def debug(msg):
return Logger._logger.debug(Logger.colorize(msg, 'debug'))
@staticmethod
def info(msg):
return Logger._logger.info(Logger.colorize(msg, 'info'))
|
apache-2.0
| -5,935,070,204,051,028,000
| 22.569231
| 85
| 0.609008
| false
| 3.83
| false
| false
| false
|
fzimmermann89/pyload
|
module/plugins/internal/Captcha.py
|
1
|
4078
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import time
from module.plugins.internal.Plugin import Plugin
from module.plugins.internal.utils import encode
class Captcha(Plugin):
__name__ = "Captcha"
__type__ = "captcha"
__version__ = "0.47"
__status__ = "stable"
__description__ = """Base anti-captcha plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def __init__(self, plugin): #@TODO: Pass pyfile instead plugin, so store plugin's html in its associated pyfile as data
self._init(plugin.pyload)
self.plugin = plugin
self.task = None #: captchaManager task
self.init()
def _log(self, level, plugintype, pluginname, messages):
messages = (self.__name__,) + messages
return self.plugin._log(level, plugintype, self.plugin.__name__, messages)
def recognize(self, image):
"""
Extend to build your custom anti-captcha ocr
"""
pass
def decrypt(self, url, get={}, post={}, ref=False, cookies=True, decode=False, req=None,
input_type='jpg', output_type='textual', ocr=True, timeout=120):
img = self.load(url, get=get, post=post, ref=ref, cookies=cookies, decode=decode, req=req or self.plugin.req)
return self.decrypt_image(img, input_type, output_type, ocr, timeout)
def decrypt_image(self, data, input_type='jpg', output_type='textual', ocr=False, timeout=120):
"""
Loads a captcha and decrypts it with ocr, plugin, user input
:param data: image raw data
:param get: get part for request
:param post: post part for request
:param cookies: True if cookies should be enabled
:param input_type: Type of the Image
:param output_type: 'textual' if text is written on the captcha\
or 'positional' for captcha where the user have to click\
on a specific region on the captcha
:param ocr: if True, ocr is not used
:return: result of decrypting
"""
result = ""
time_ref = ("%.2f" % time.time())[-6:].replace(".", "")
with open(os.path.join("tmp", "captcha_image_%s_%s.%s" % (self.plugin.__name__, time_ref, input_type)), "wb") as tmp_img:
tmp_img.write(encode(data))
if ocr:
if isinstance(ocr, basestring):
OCR = self.pyload.pluginManager.loadClass("captcha", ocr) #: Rename `captcha` to `ocr` in 0.4.10
result = OCR(self.plugin).recognize(tmp_img.name)
else:
result = self.recognize(tmp_img.name)
if not result:
captchaManager = self.pyload.captchaManager
try:
self.task = captchaManager.newTask(data, input_type, tmp_img.name, output_type)
captchaManager.handleCaptcha(self.task)
self.task.setWaiting(max(timeout, 50)) #@TODO: Move to `CaptchaManager` in 0.4.10
while self.task.isWaiting():
self.plugin.check_status()
time.sleep(1)
finally:
captchaManager.removeTask(self.task)
if self.task.error:
self.fail(self.task.error)
elif not self.task.result:
self.plugin.retry_captcha(msg=_("No captcha result obtained in appropriate time"))
result = self.task.result
if not self.pyload.debug:
try:
os.remove(tmp_img.name)
except OSError, e:
self.log_warning(_("Error removing `%s`") % tmp_img.name, e)
# self.log_info(_("Captcha result: ") + result) #@TODO: Remove from here?
return result
def invalid(self):
if not self.task:
return
self.log_warning(_("Invalid captcha"))
self.task.invalid()
def correct(self):
if not self.task:
return
self.log_info(_("Correct captcha"))
self.task.correct()
|
gpl-3.0
| 4,668,200,575,721,839,000
| 30.859375
| 129
| 0.576018
| false
| 3.836312
| false
| false
| false
|
Karaage-Cluster/karaage-debian
|
karaage/plugins/kgapplications/templatetags/applications.py
|
1
|
3655
|
# Copyright 2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" Application specific tags. """
import django_tables2 as tables
from django import template
from karaage.people.tables import PersonTable
from ..views.base import get_state_machine
register = template.Library()
@register.simple_tag(takes_context=True)
def application_state(context, application):
""" Render current state of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_state.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_request(context, application):
""" Render current detail of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_request.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_simple_state(context, application):
""" Render current state of application, verbose. """
state_machine = get_state_machine(application)
state = state_machine.get_state(application)
return state.name
@register.inclusion_tag(
'kgapplications/common_actions.html', takes_context=True)
def application_actions(context):
""" Render actions available. """
return {
'roles': context['roles'],
'actions': context['actions'],
'extra': "",
}
@register.tag(name="application_actions_plus")
def do_application_actions_plus(parser, token):
""" Render actions available with extra text. """
nodelist = parser.parse(('end_application_actions',))
parser.delete_first_token()
return ApplicationActionsPlus(nodelist)
class ApplicationActionsPlus(template.Node):
""" Node for rendering actions available with extra text. """
def __init__(self, nodelist):
super(ApplicationActionsPlus, self).__init__()
self.nodelist = nodelist
def render(self, context):
extra = self.nodelist.render(context)
nodelist = template.loader.get_template(
'kgapplications/common_actions.html')
new_context = {
'roles': context['roles'],
'extra': extra,
'actions': context['actions'],
}
output = nodelist.render(new_context)
return output
@register.assignment_tag(takes_context=True)
def get_similar_people_table(context, applicant):
queryset = applicant.similar_people()
table = PersonTable(
queryset,
empty_text="(No potential duplicates found, please check manually)")
config = tables.RequestConfig(context['request'], paginate={"per_page": 5})
config.configure(table)
return table
|
gpl-3.0
| 2,375,639,057,141,617,700
| 31.061404
| 79
| 0.685636
| false
| 4.106742
| false
| false
| false
|
usc-isi/extra-specs
|
nova/tests/api/openstack/compute/contrib/test_quotas.py
|
1
|
8680
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import quotas
from nova.api.openstack import wsgi
from nova import test
from nova.tests.api.openstack import fakes
def quota_set(id):
return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100}}
class QuotaSetsTest(test.TestCase):
def setUp(self):
super(QuotaSetsTest, self).setUp()
self.controller = quotas.QuotaSetsController()
def test_format_quota_set(self):
raw_quota_set = {
'instances': 10,
'cores': 20,
'ram': 51200,
'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
'gigabytes': 1000,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['volumes'], 10)
self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
def test_quotas_defaults(self):
uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
expected = {'quota_set': {
'id': 'fake_tenant',
'instances': 10,
'cores': 20,
'ram': 51200,
'volumes': 10,
'gigabytes': 1000,
'floating_ips': 10,
'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}}
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
self.assertEqual(res_dict, quota_set('1234'))
def test_quotas_show_as_unauthorized_user(self):
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_quotas_update_as_admin(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'volumes': 10,
'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, body)
def test_quotas_update_as_user(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
'ram': 51200, 'volumes': 10,
'gigabytes': 1000, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body)
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'volumes': -2,
'gigabytes': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
super(QuotaXMLSerializerTest, self).setUp()
self.serializer = quotas.QuotaTemplate()
self.deserializer = wsgi.XMLDeserializer()
def test_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=10,
injected_file_content_bytes=20,
volumes=30,
gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
injected_files=80,
security_groups=10,
security_group_rules=20,
key_pairs=100,
cores=90))
text = self.serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertTrue(child.tag in exemplar['quota_set'])
self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
def test_deserializer(self):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
volumes='30',
gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
injected_files='80',
security_groups='10',
security_group_rules='20',
key_pairs='100',
cores='90'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<quota_set>'
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
'<volumes>30</volumes>'
'<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
'<security_group_rules>20</security_group_rules>'
'<key_pairs>100</key_pairs>'
'<cores>90</cores>'
'</quota_set>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
|
apache-2.0
| 7,071,073,384,831,385,000
| 38.634703
| 79
| 0.521429
| false
| 4.135303
| true
| false
| false
|
googleads/google-ads-python
|
examples/shopping_ads/add_shopping_product_listing_group_tree.py
|
1
|
15958
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds a shopping listing group tree to a shopping ad group.
The example will clear an existing listing group tree and rebuild it include the
following tree structure:
ProductCanonicalCondition NEW $0.20
ProductCanonicalCondition USED $0.10
ProductCanonicalCondition null (everything else)
ProductBrand CoolBrand $0.90
ProductBrand CheapBrand $0.01
ProductBrand null (everything else) $0.50
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
last_criterion_id = 0
def _next_id():
"""Returns a decreasing negative number for temporary ad group criteria IDs.
The ad group criteria will get real IDs when created on the server.
Returns -1, -2, -3, etc. on subsequent calls.
Returns:
The string representation of a negative integer.
"""
global last_criterion_id
last_criterion_id -= 1
return str(last_criterion_id)
# [START add_shopping_product_listing_group_tree]
def main(client, customer_id, ad_group_id, replace_existing_tree):
"""Adds a shopping listing group tree to a shopping ad group.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
replace_existing_tree: Boolean, whether to replace the existing listing
group tree on the ad group. Defaults to false.
"""
# Get the AdGroupCriterionService client.
ad_group_criterion_service = client.get_service("AdGroupCriterionService")
# Optional: Remove the existing listing group tree, if it already exists
# on the ad group. The example will throw a LISTING_GROUP_ALREADY_EXISTS
# error if a listing group tree already exists and this option is not
# set to true.
if replace_existing_tree:
_remove_listing_group_tree(client, customer_id, ad_group_id)
# Create a list of ad group criteria operations.
operations = []
# Construct the listing group tree "root" node.
# Subdivision node: (Root node)
ad_group_criterion_root_operation = _create_listing_group_subdivision(
client, customer_id, ad_group_id
)
# Get the resource name that will be used for the root node.
# This resource has not been created yet and will include the temporary
# ID as part of the criterion ID.
ad_group_criterion_root_resource_name = (
ad_group_criterion_root_operation.create.resource_name
)
operations.append(ad_group_criterion_root_operation)
# Construct the listing group unit nodes for NEW, USED, and other.
product_condition_enum = client.enums.ProductConditionEnum
condition_dimension_info = client.get_type("ListingDimensionInfo")
# Biddable Unit node: (Condition NEW node)
# * Product Condition: NEW
# * CPC bid: $0.20
condition_dimension_info.product_condition.condition = (
product_condition_enum.NEW
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
200_000,
)
)
# Biddable Unit node: (Condition USED node)
# * Product Condition: USED
# * CPC bid: $0.10
condition_dimension_info.product_condition.condition = (
product_condition_enum.USED
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
100_000,
)
)
# Sub-division node: (Condition "other" node)
# * Product Condition: (not specified)
# Note that all sibling nodes must have the same dimension type, even if
# they don't contain a bid.
client.copy_from(
condition_dimension_info.product_condition,
client.get_type("ProductConditionInfo"),
)
ad_group_criterion_other_operation = _create_listing_group_subdivision(
client,
customer_id,
ad_group_id,
ad_group_criterion_root_resource_name,
condition_dimension_info,
)
# Get the resource name that will be used for the condition other node.
# This resource has not been created yet and will include the temporary
# ID as part of the criterion ID.
ad_group_criterion_other_resource_name = (
ad_group_criterion_other_operation.create.resource_name
)
operations.append(ad_group_criterion_other_operation)
# Build the listing group nodes for CoolBrand, CheapBrand, and other.
brand_dimension_info = client.get_type("ListingDimensionInfo")
# Biddable Unit node: (Brand CoolBrand node)
# * Brand: CoolBrand
# * CPC bid: $0.90
brand_dimension_info.product_brand.value = "CoolBrand"
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
900_000,
)
)
# Biddable Unit node: (Brand CheapBrand node)
# * Brand: CheapBrand
# * CPC bid: $0.01
brand_dimension_info.product_brand.value = "CheapBrand"
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
10_000,
)
)
# Biddable Unit node: (Brand other node)
# * CPC bid: $0.05
client.copy_from(
brand_dimension_info.product_brand,
client.get_type("ProductBrandInfo"),
)
operations.append(
_create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
ad_group_criterion_other_resource_name,
brand_dimension_info,
50_000,
)
)
# Add the ad group criteria.
mutate_ad_group_criteria_response = (
ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=operations
)
)
# Print the results of the successful mutates.
print(
"Added ad group criteria for the listing group tree with the "
"following resource names:"
)
for result in mutate_ad_group_criteria_response.results:
print(f"\t{result.resource_name}")
print(f"{len(mutate_ad_group_criteria_response.results)} criteria added.")
# [END add_shopping_product_listing_group_tree]
def _remove_listing_group_tree(client, customer_id, ad_group_id):
"""Removes ad group criteria for an ad group's existing listing group tree.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID from which to remove the listing group
tree.
"""
# Get the GoogleAdsService client.
googleads_service = client.get_service("GoogleAdsService")
print("Removing existing listing group tree...")
# Create a search Google Ads request that will retrieve all listing groups
# where the parent ad group criterion is NULL (and hence the root node in
# the tree) for a given ad group id.
query = f"""
SELECT ad_group_criterion.resource_name
FROM ad_group_criterion
WHERE
ad_group_criterion.type = LISTING_GROUP
AND ad_group_criterion.listing_group.parent_ad_group_criterion IS NULL
AND ad_group.id = {ad_group_id}"""
results = googleads_service.search(customer_id=customer_id, query=query)
ad_group_criterion_operations = []
# Iterate over all rows to find the ad group criteria to remove.
for row in results:
criterion = row.ad_group_criterion
print(
"Found an ad group criterion with resource name: "
f"'{criterion.resource_name}'."
)
ad_group_criterion_operation = client.get_type(
"AdGroupCriterionOperation"
)
ad_group_criterion_operation.remove = criterion.resource_name
ad_group_criterion_operations.append(ad_group_criterion_operation)
if ad_group_criterion_operations:
# Remove the ad group criteria that define the listing group tree.
ad_group_criterion_service = client.get_service(
"AdGroupCriterionService"
)
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=ad_group_criterion_operations
)
print(f"Removed {len(response.results)} ad group criteria.")
def _create_listing_group_subdivision(
client,
customer_id,
ad_group_id,
parent_ad_group_criterion_resource_name=None,
listing_dimension_info=None,
):
"""Creates a new criterion containing a subdivision listing group node.
If the parent ad group criterion resource name or listing dimension info are
not specified, this method creates a root node.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
parent_ad_group_criterion_resource_name: The string resource name of the
parent node to which this listing will be attached.
listing_dimension_info: A ListingDimensionInfo object containing details
for this listing.
Returns:
An AdGroupCriterionOperation containing a populated ad group criterion.
"""
# Create an ad group criterion operation and populate the criterion.
operation = client.get_type("AdGroupCriterionOperation")
ad_group_criterion = operation.create
# The resource name the criterion will be created with. This will define
# the ID for the ad group criterion.
ad_group_criterion.resource_name = client.get_service(
"AdGroupCriterionService"
).ad_group_criterion_path(customer_id, ad_group_id, _next_id())
ad_group_criterion.status = client.enums.AdGroupCriterionStatusEnum.ENABLED
listing_group_info = ad_group_criterion.listing_group
# Set the type as a SUBDIVISION, which will allow the node to be the
# parent of another sub-tree.
listing_group_info.type_ = client.enums.ListingGroupTypeEnum.SUBDIVISION
# If parent_ad_group_criterion_resource_name and listing_dimension_info
# are not null, create a non-root division by setting its parent and case
# value.
if (
parent_ad_group_criterion_resource_name
and listing_dimension_info != None
):
# Set the ad group criterion resource name for the parent listing group.
# This can include a temporary ID if the parent criterion is not yet
# created.
listing_group_info.parent_ad_group_criterion = (
parent_ad_group_criterion_resource_name
)
# Case values contain the listing dimension used for the node.
client.copy_from(listing_group_info.case_value, listing_dimension_info)
return operation
def _create_listing_group_unit_biddable(
client,
customer_id,
ad_group_id,
parent_ad_group_criterion_resource_name,
listing_dimension_info,
cpc_bid_micros=None,
):
"""Creates a new criterion containing a biddable unit listing group node.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
ad_group_id: The ad group ID to which the node will be added.
parent_ad_group_criterion_resource_name: The string resource name of the
parent node to which this listing will be attached.
listing_dimension_info: A ListingDimensionInfo object containing details
for this listing.
cpc_bid_micros: The cost-per-click bid for this listing in micros.
Returns:
An AdGroupCriterionOperation with a populated create field.
"""
# Note: There are two approaches for creating new unit nodes:
# (1) Set the ad group resource name on the criterion (no temporary ID
# required).
# (2) Use a temporary ID to construct the criterion resource name and set
# it to the 'resourceName' attribute.
# In both cases you must set the parent ad group criterion's resource name
# on the listing group for non-root nodes.
# This example demonstrates method (1).
operation = client.get_type("AdGroupCriterionOperation")
criterion = operation.create
criterion.ad_group = client.get_service("AdGroupService").ad_group_path(
customer_id, ad_group_id
)
criterion.status = client.enums.AdGroupCriterionStatusEnum.ENABLED
# Set the bid for this listing group unit.
# This will be used as the CPC bid for items that are included in this
# listing group.
if cpc_bid_micros:
criterion.cpc_bid_micros = cpc_bid_micros
listing_group = criterion.listing_group
# Set the type as a UNIT, which will allow the group to be biddable.
listing_group.type_ = client.enums.ListingGroupTypeEnum.UNIT
# Set the ad group criterion resource name for the parent listing group.
# This can have a temporary ID if the parent criterion is not yet created.
listing_group.parent_ad_group_criterion = (
parent_ad_group_criterion_resource_name
)
# Case values contain the listing dimension used for the node.
if listing_dimension_info != None:
client.copy_from(listing_group.case_value, listing_dimension_info)
return operation
if __name__ == "__main__":
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description="Add shopping product listing group tree to a shopping ad "
"group."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a",
"--ad_group_id",
type=str,
required=True,
help="The ID of the ad group that will receive the listing group tree.",
)
parser.add_argument(
"-r",
"--replace_existing_tree",
action="store_true",
required=False,
default=False,
help="Optional, whether to replace the existing listing group tree on "
"the ad group if one already exists. Defaults to false.",
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.ad_group_id,
args.replace_existing_tree,
)
except GoogleAdsException as ex:
print(
f"Request with ID '{ex.request_id}' failed with status "
f"'{ex.error.code().name}' and includes the following errors:"
)
for error in ex.failure.errors:
print(f"\tError with message '{error.message}'.")
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
|
apache-2.0
| 7,351,844,020,958,273,000
| 35.43379
| 80
| 0.665058
| false
| 4.007534
| false
| false
| false
|
devincornell/networkxtimeseries
|
NetTS.py
|
1
|
13522
|
# system imports
import multiprocessing
import pickle
import sys
from itertools import *
# anaconda imports
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class NetTS:
''' Network Time Series '''
### member vars ###
# self.nts - list of networks representing timeseries
# self.N - number of graphs in the timeseries
# self.ts is a timeseries list
def __init__(self, ts, nodes=None, edges=None, type='static_nodes', GraphType=nx.Graph):
ts = list(ts) # ts is a timeseries list
if nodes is not None: nodes = list(nodes) # nodes is a list of node names
if edges is not None: edges = list(edges) # edges is a list of edges
# set timeseries type
if type == 'static_nodes' or type == 'static_structure':
self.type = type
elif type == 'dynamic':
print('Error - choose at least a set of nodes in NetTS init.')
print('Support for dynamic nodes is not supported.')
exit()
else:
print('network type not recognized in NetTs init.')
exit()
# make networks
self.ts = ts
self.N = len(ts)
self.nts = []
for i in range(self.N):
self.nts.append(GraphType(name=ts[i]))
# set nodes
if nodes is not None:
self.nodes = nodes
self.add_nodes(nodes)
else:
self.nodes = list()
# set edges
self.edges = edges
if edges is not None:
for t in self.ts:
for e in edges:
self[t].add_edge(e)
else:
self.edges = list()
self.data = {} # for user data (similar to nx.Graph.graph)
def __str__(self):
return '<NetTs:type=%s,numnodes=%d,numedges=%d>' % (
self.type,
len(self.nodes) if self.nodes is not None else -1,
len(self.edges) if self.edges is not None else -1
)
def __getitem__(self,key):
i = self.ts.index(key)
return self.nts[i]
def save_nts(self,ntsfile):
with open(ntsfile,mode='wb') as f:
data = pickle.dump(self,f)
return
def save_xgmml(self, filename):
ndf = self.getNodeAttr()
edf = self.getEdgeAttr()
with open(filename,'w') as f:
build_xgmml_file(f,ndf,edf)
return
def rm_graph(self, key):
i = self.ts.index(key)
self.nts.pop(i)
self.ts.pop(i)
self.N = len(self.ts)
def add_nodes(self, nodes, t=None):
''' This function will nodes to every graph in the timeseries.'''
if t is None:
for t in self.ts:
for n in nodes:
self[t].add_node(n)
else:
#raise(Exception("This functionality hasn't been implemented yet."))
for n in nodes:
self[t].add_node(n)
return
##### Get/Set Graph, Node, and Edge Attributes #####
def get_node_attr(self,t=None,parallel=False):
''' Measure all node attributes across time.
'''
ndf = self.time_measure(meas_node_attr, meas_obj='nodes', parallel=parallel)
ndf.sort_index(axis='columns',inplace=True)
return ndf
def get_edge_attr(self,t=None,parallel=False):
''' Measure all edge attributes across time.
'''
edf = self.time_measure(meas_edge_attr, meas_obj='edges', parallel=parallel)
edf.sort_index(axis='columns',inplace=True)
return edf
def set_graph_attr(self, t, attrName, gdata):
''' Adds an attribute to every graph in the network
at time t. gdata is a list of attributes to apply.
'''
for t in self.ts:
self[t].graph[attrName] = gdata[i]
return
def set_node_attr(self, t, attrName, ndata):
''' Adds an attribute to every edge in the network
at time t. Name specified by attrName and data given
in edata, a dictionary of node->vlaue pairs.
'''
for key,val in ndata:
self[t].node[key][attrName] = val
return
def set_edge_attr(self, t, attrName, edata):
''' Adds an attribute to every edge in the network
at time t. Name specified by attrName and data given
in edata, a dictionary of edge(tuple)->value pairs.
'''
for i,j in edata.keys():
try:
self[t].edge[i][j]
except:
self[t].add_edge(i,j)
self.edges.append((i,j))
self[t].edge[i][j][attrName] = edata[(i,j)]
return
def set_node_attrdf(self, df):
''' Adds node data assuming that edata is a pandas
dataframe formatted with multiindexed columns
(n,attr) and indexed rows with time.
'''
for n in mdf(df.columns,()):
for attr in mdf(df.columns,(n,)):
for t in df.index:
try: self[t].node[n]
except KeyError: self[t].add_node(n)
self[t].node[n][attr] = df.loc[t,(n,attr)]
def set_edge_attrdf(self, df):
''' Adds edge data assuming that edata is a pandas
dataframe formatted with multiindexed columns
(u,v,attr) and indexed rows with time.
'''
for u in mdf(df.columns,()):
for v in mdf(df.columns,(u,)):
for attr in mdf(df.columns,(u,v)):
for t in df.index:
try:
self[t].edge[u][v]
except KeyError:
self[t].add_edge(u,v)
self[t].edge[u][v][attr] = df.loc[t,(u,v,attr)]
##### Modify the Graphs and Return NetTS #####
def modify_graphs(self, modFunc):
''' Returns a NetTs object where each graph has
been run through modFunc. modFunc
should take a graph and return a modified graph.
'''
outNet = NetTs(self.ts,nodes=self.nodes,edges=self.edges)
for t in self.ts:
outNet[t] = modFunc(self[t])
return outNet
##### Measure Properties of Graphs Over Time #####
def time_measure(self, measFunc, meas_obj='graph', addtnlArgs=list(), workers=1, verb=False):
''' Returns a multiindex dataframe of measurements for all nodes at each
point in time. measFunc should expect a graph object and return a
dictionary with (node,attr) as keys. Output: The index will be a timeseries,
columns will be multi-indexed - first by node name then by attribute.
'''
# error checking
if verb: print('error checking first graph at', meas_obj, 'level.')
if not (meas_obj == 'graph' or meas_obj == 'nodes' or meas_obj == 'edges'): raise
trymeas = measFunc(self.nts[0], *addtnlArgs)
try: dict(trymeas)
except TypeError: print('Error in measure(): measFunc should return a dict'); exit()
if meas_obj == 'nodes' or meas_obj == 'edges':
try: [list(m) for m in trymeas];
except TypeError: print('Error in measure(): measFunc keys should follow (node,attr).'); exit()
if len(trymeas) == 0: # return empty dataframe
return pd.DataFrame()
if meas_obj == 'graph':
cols = list(trymeas.keys())
if verb: print('measuring graphs.')
elif meas_obj == 'nodes':
cols = pd.MultiIndex.from_tuples(trymeas.keys(),names=['node','attr'])
if verb: print('measuring nodes.')
elif meas_obj == 'edges':
cols = pd.MultiIndex.from_tuples(trymeas.keys(),names=['from','to','attr'])
if verb: print('measuring edges.')
df = pd.DataFrame(index=self.ts,columns=cols, dtype=np.float64)
tdata = [(self[t],t,measFunc,addtnlArgs,meas_obj,cols) for t in self.ts]
if workers <= 1:
if verb: print('measuring in one thread.')
meas = map(self._thread_time_measure, tdata)
else:
if verb: print('measuring with', workers, 'cores.')
with multiprocessing.Pool(processes=workers) as p:
meas = p.map(self._thread_time_measure, tdata)
for t,mdf in meas:
df.loc[[t],:] = mdf
df = df.sort_index(axis=1)
return df
def _thread_time_measure(self, dat):
''' This is a thread function that will call measFunc on each
network in the timeseries. measFunc is responsible for returning
a dictionary with (node,attr) keys.
'''
G,t,measFunc,addtnlArgs,meas_obj,cols = dat
meas = measFunc(G, *addtnlArgs)
return t,pd.DataFrame([meas,],index=[t,],columns=cols)
def time_plot(self, *arg, **narg):
meas = self.time_measure(*arg, **narg)
ts = range(len(self.ts))
for col in meas.columns:
plt.plot(ts, meas[col], label=col)
plt.xticks(ts, self.ts)
plt.legend()
def mdf(mi,match):
''' Returns the list of children of the ordered match
set given by match. Specifically for dataframe looping.
'''
matchfilt = filter(lambda x: x[:len(match)] == match,mi)
return set([x[len(match)] for x in matchfilt])
def from_nts(ntsfilepath):
nts = None
with open(ntsfilepath,'rb') as f:
nts = pickle.load(f)
return nts
##### Standalone Measurement Functions #####
''' These functions are used in the class but not explicitly class
members.
'''
def meas_node_attr(G):
meas = dict()
attrnames = G.nodes(data=True)[0][1].keys() # attr dict from first node
for attrname in attrnames:
attr = nx.get_node_attributes(G,attrname)
meas.update({(n,attrname):attr[n] for n in G.nodes()})
return meas
def meas_edge_attr(G):
meas = dict()
e0 = G.edges()[0]
attrnames = G.get_edge_data(e0[0],e0[1]).keys()
for attrname in attrnames:
attr = nx.get_edge_attributes(G,attrname)
meas.update({(e[0],e[1],attrname):attr[e] for e in G.edges()})
return meas
##### Change Detection Functions #####
def get_value_changes(ds):
''' Takes a data series and outputs (start,val) pairs -
one for each change in the value of the data series.
'''
changes = [(ds.index[0],ds[ds.index[0]])]
for ind in ds.index[1:]:
if ds[ind] != changes[-1][1]:
changes.append((ind,ds[ind]))
return changes
##### XGMML File Output Functions #####
def build_xgmml_file(f,ndf,edf):
''' This function builds the xml file given the file object f,
a graph df, node df, and edge df. First it will look at when
attributes change, and then use that to decide when to add an
attribute tag.
'''
t0 = edf.index[0]
tf = edf.index[-1]
f.write(header_str)
f.write(graph_start_str.format(label='mygraph'))
for n in list(set([x[0] for x in ndf.columns])):
values = {'label':str(n),'id':str(n),'start':t0,'end':tf}
f.write(node_start_str.format(**values))
for attr in [x[1] for x in filter(lambda x:x[0]==n,ndf.columns)]:
changes = get_value_changes(ndf.loc[:,(n,attr)])
write_attr(f,attr,changes,tf)
f.write(node_end_str)
for u,v in list(set([x[:2] for x in edf.columns])):
values = {'label':'(%s,%s)'%(str(u),str(v)),'source':str(u),'target':str(v),'start':t0,'end':tf}
f.write(edge_start_str.format(**values))
for attr in [x[2] for x in filter(lambda x:x[:2] == (u,v),edf.columns)]:
changes = get_value_changes(edf.loc[:,(u,v,attr)])
write_attr(f,attr,changes,tf)
f.write(edge_end_str)
f.write(graph_end_str)
return
def write_attr(f,attr,changes,tf):
if type(changes[0][1]) is str:
typ = 'string'
changes = list(map(lambda x: (x[0],str(x[1])), changes))
elif type(changes[0][1]) is int or type(changes[0][1]) is float or type(changes[0][1]) is np.int64 or type(changes[0][1]) is np.float64:
typ = 'real'
changes = list(map(lambda x: (x[0],'{:.9f}'.format(float(x[1]))), changes))
else:
print('There was an error with the attribute type of the network timeseries:', type(changes[0][1]))
raise
for i in range(len(changes[:-1])):
if changes[i][1] is not 'nan' and changes[i][1] is not 'None':
values = {'name':attr,'type':typ,'value':changes[i][1],'start':changes[i][0],'end':changes[i+1][0]}
f.write(attr_str.format(**values))
if len(changes) == 1 and changes[0][1] is not 'None' and changes[0][1] is not 'nan':
values = {'name':attr,'type':typ,'value':changes[0][1],'start':changes[0][0],'end':tf}
f.write(attr_str.format(**values))
##### File Output Strings #####
header_str = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Created using the networkxtimeseries library for python. -->\n\n'''
graph_start_str = '<graph label="{label}" directed="0">\n'
graph_end_str = '</graph>\n'
node_start_str = '\t<node label="{label}" id="{id}" start="{start}" end="{end}">\n'
node_end_str = '\t</node>\n'
edge_start_str = '\t<edge label="{label}" source="{source}" target="{target}" start="{start}" end="{end}">\n'
edge_end_str = '\t</edge>\n'
attr_str = '\t\t<att name="{name}" type="{type}" value="{value}" start="{start}" end="{end}"/>\n'
|
mit
| -2,272,877,648,505,575,000
| 33.402036
| 140
| 0.564867
| false
| 3.559768
| false
| false
| false
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/cnn_util.py
|
1
|
8496
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for CNN benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow.compat.v1 as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
|
apache-2.0
| 2,613,338,284,972,577,300
| 32.581028
| 80
| 0.668432
| false
| 3.828752
| false
| false
| false
|
simodalla/newage
|
newage/views.py
|
1
|
2723
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import ListView, DetailView
from django.utils.translation import ugettext_lazy as _
from .models import RdesktopSession, RdesktopUser
class DeployRdesktopTerminalServerList(ListView):
content_type = 'text/plain'
http_method_names = ['get']
template_name = 'newage/deploy/terminalserver_list.txt'
def render_to_response(self, context, **response_kwargs):
response = super(DeployRdesktopTerminalServerList,
self).render_to_response(context, **response_kwargs)
response['Content-Disposition'] = (
'attachment; filename="terminal_servers.txt"')
return response
def get_queryset(self):
user = get_object_or_404(RdesktopUser,
username__iexact=self.kwargs['username'])
queryset = RdesktopSession.objects.filter(user=user).order_by(
'server__fqdn')
format = self.request.GET.get('format', 'plain')
if format == 'url':
return [self.request.build_absolute_uri(session.get_absolute_url())
for session in queryset]
return [session.server.fqdn.lower() for session in queryset]
def get_context_data(self, **kwargs):
context = super(DeployRdesktopTerminalServerList,
self).get_context_data(**kwargs)
return context
class DeployRdesktopSessionDetail(DetailView):
model = RdesktopSession
content_type = 'text/plain'
http_method_names = ['get']
template_name = 'newage/deploy/rdesktopsession_detail.txt'
def render_to_response(self, context, **response_kwargs):
response = super(DeployRdesktopSessionDetail,
self).render_to_response(context, **response_kwargs)
response['Content-Disposition'] = (
'attachment; filename="redsktop_{}.desktop"'.format(
self.kwargs.get('fqdn')))
return response
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
username = self.kwargs.get('username')
fqdn = self.kwargs.get('fqdn')
try:
obj = queryset.filter(
user__username__iexact=username,
server__fqdn__iexact=fqdn).get()
except ObjectDoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
|
bsd-3-clause
| 3,315,744,594,874,511,000
| 37.352113
| 79
| 0.634594
| false
| 4.294953
| false
| false
| false
|
kansanmuisti/kamu
|
Attic/eduskunta/find-mp-twitter.py
|
1
|
2082
|
#!/usr/bin/env python
import sys
import pickle
from twython import Twython
from django.core.management import setup_environ
sys.path.append('.')
import settings
setup_environ(settings)
from parliament.models import Member, MemberSocialFeed
PICKLE_FILE="mp-twitter.pickle"
twitter = Twython()
def read_twitter_lists():
twitter_lists = ((24404831, 6970755), (17680567, 3656966))
mps = {}
for tw_li in twitter_lists:
args = dict(list_id=tw_li[1], owner_id=tw_li[0], username=tw_li[0],
skip_status=True)
while True:
results = twitter.getListMembers(**args)
users = results['users']
for user in users:
if user['id'] not in mps:
mps[user['id']] = user
print("%s:%s" % (user['name'], user['id']))
cursor = results['next_cursor']
if not cursor:
break
args['cursor'] = cursor
return mps
try:
f = open(PICKLE_FILE, 'r')
tw_mps = pickle.load(f)
except IOError:
tw_mps = read_twitter_lists()
f = open(PICKLE_FILE, 'w')
pickle.dump(tw_mps, f)
f.close()
MP_TRANSFORM = {
"veltto virtanen": "Pertti Virtanen",
"n. johanna sumuvuori": "Johanna Sumuvuori",
"eeva-johanna elorant": "Eeva-Johanna Eloranta",
"outi alanko-kahiluot": "Outi Alanko-Kahiluoto",
}
print("%d Twitter feeds found" % len(list(tw_mps.keys())))
mp_list = list(Member.objects.all())
for (tw_id, tw_info) in list(tw_mps.items()):
for mp in mp_list:
name = tw_info['name'].lower()
if name in MP_TRANSFORM:
name = MP_TRANSFORM[name].lower()
if mp.get_print_name().lower() == name.lower():
break
else:
print("%s: no match" % tw_info['name'])
continue
try:
feed = MemberSocialFeed.objects.get(member=mp, type='TW', origin_id=tw_id)
except MemberSocialFeed.DoesNotExist:
feed = MemberSocialFeed(member=mp, type='TW', origin_id=tw_id)
feed.account_name = tw_info['screen_name']
feed.save()
|
agpl-3.0
| -8,833,156,142,159,256,000
| 26.76
| 82
| 0.59318
| false
| 3.154545
| false
| false
| false
|
JustinTulloss/harmonize.fm
|
fileprocess/fileprocess/configuration.py
|
1
|
2469
|
# A configuration file for the fileprocess. We could do a .ini, but everybody
# knows python here
import logging
import os
from logging import handlers
config = {
'port': 48260,
'S3.accesskey': '17G635SNK33G1Y7NZ2R2',
'S3.secret': 'PHDzFig4NYRJoKKW/FerfhojljL+sbNyYB9bEpHs',
'S3.music_bucket': 'music.rubiconmusicplayer.com',
'S3.upload': True,
'sqlalchemy.default.convert_unicode': True,
'upload_dir': '../masterapp/tmp',
'media_dir': '../masterapp/media',
'pyfacebook.callbackpath': None,
'pyfacebook.apikey': 'cec673d0ef3fbc12395d0d3500cd72f9',
'pyfacebook.secret': 'a08f822bf3d7f80ee25c47414fe98be1',
'pyfacebook.appid': '2364724122',
'musicdns.key': 'ffa7339e1b6bb1d26593776b4257fce1',
'maxkbps': 192000,
'sqlalchemy.default.url': 'sqlite:///../masterapp/music.db',
'cache_dir': '../masterapp/cache'
}
dev_config = {
'S3.upload': False,
'tagshelf': '../masterapp/tags.archive'
}
test_config = {
'sqlalchemy.default.url': 'sqlite:///:memory:',
'sqlalchemy.reflect.url': 'sqlite:///../../masterapp/music.db',
'upload_dir': './test/testuploaddir',
'media_dir': './test/teststagingdir',
'tagshelf': './test/tagshelf'
}
production_config = {
'S3.upload': True,
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/rubicon',
'sqlalchemy.default.pool_recycle': 3600,
'upload_dir': '/var/opt/stage_uploads',
'media_dir': os.environ.get('MEDIA'),
'tagshelf': '/var/opt/tagshelf.archive',
'cache_dir': '/tmp/stage_cache'
}
live_config = {
'port': 48262,
'upload_dir': '/var/opt/uploads',
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/harmonize',
'cache_dir': '/tmp/live_cache'
}
base_logging = {
'level': logging.INFO,
'format':'%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S',
'handler': logging.StreamHandler,
'handler_args': ()
}
dev_logging = {
'level': logging.DEBUG
}
production_logging = {
'level': logging.INFO,
'handler': handlers.TimedRotatingFileHandler,
'handler_args': ('/var/log/rubicon/filepipe', 'midnight', 0, 7)
}
live_logging = {
'handler_args': ('/var/log/harmonize/filepipe', 'midnight', 0, 7)
}
def update_config(nconfig):
global config
config.update(nconfig)
def lupdate_config(nconfig):
global base_logging
base_logging.update(config)
|
mit
| -6,707,404,347,512,413,000
| 27.056818
| 80
| 0.649656
| false
| 2.84447
| true
| false
| false
|
wger-project/wger
|
wger/weight/api/views.py
|
1
|
1461
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
# Third Party
from rest_framework import viewsets
# wger
from wger.weight.api.serializers import WeightEntrySerializer
from wger.weight.models import WeightEntry
class WeightEntryViewSet(viewsets.ModelViewSet):
"""
API endpoint for nutrition plan objects
"""
serializer_class = WeightEntrySerializer
is_private = True
ordering_fields = '__all__'
filterset_fields = ('date', 'weight')
def get_queryset(self):
"""
Only allow access to appropriate objects
"""
return WeightEntry.objects.filter(user=self.request.user)
def perform_create(self, serializer):
"""
Set the owner
"""
serializer.save(user=self.request.user)
|
agpl-3.0
| 63,408,866,045,461,896
| 30.76087
| 78
| 0.711841
| false
| 4.058333
| false
| false
| false
|
swingr/meeseeks
|
meeseeks.py
|
1
|
1298
|
import pebble as p
import audio
class Meeseeks():
def __init__(self, id="464F", name="Mr. Meeseeks"):
self.id = id
self.name = name
self.pebble = None
self.score = []
def connect(self):
self.pebble = p.Pebble(self.id)
def send(self, msg):
self.pebble.notification_sms(self.name, msg)
def start(self):
self.send("Are you ready to take two strokes off your game! Ohhhhh yeah!")
audio.start()
def shoulders(self):
self.send("Remember to square your shoulders!")
audio.shoulders()
def choke(self):
self.send("Choke up on the club!")
audio.choke()
def existence(self):
self.send("Existence is pain!")
audio.existence()
def frustrating(self):
self.send("Arrgghhhhhhh!")
audio.frustrating()
def head(self):
self.send("Keep your head down!")
audio.head()
def follow(self):
self.send("You gotta follow through!")
audio.follow()
def nice(self):
self.send("NIIIICCCCCEEEE!")
audio.nice()
def short(self):
self.send("What about your short game")
audio.short()
if __name__ == "__main__":
meeseeks = Meeseeks()
meeseeks.connect()
meeseeks.choke()
|
mit
| 8,157,238,295,061,237,000
| 22.178571
| 82
| 0.572419
| false
| 3.415789
| false
| false
| false
|
operasoftware/dragonfly-build-tools
|
df2/codegen/msgdefs.py
|
1
|
3763
|
import os
import sys
import time
import protoparser
import protoobjects
import utils
INDENT = " "
CSS_CLASSES = {
protoobjects.NUMBER: "number",
protoobjects.BUFFER: "string",
protoobjects.BOOLEAN: "boolean",
}
def indent(count): return count * INDENT
def print_doc(file, field, depth):
if field.doc:
file.write("%s%s" % (indent(depth), "<span class=\"comment\">/**\n"))
for line in field.doc_lines:
file.write("%s%s%s\n" % (indent(depth), " * ", line.replace("&", "&").replace("<", "<")))
file.write(indent(depth) + " */</span>\n")
def print_enum(file, enum, depth=0):
file.write("%s{\n" % indent(depth))
depth += 1
for f in enum.fields:
print_doc(file, f, depth)
args = indent(depth), f.name, f.key
file.write("%s<span class=\"enum\">%s</span> = %s;\n" % args)
depth -= 1
file.write("%s}\n" % (indent(depth)))
def print_message(file, msg, include_message_name=True, depth=0, recurse_list=[]):
if include_message_name:
file.write("%smessage <span class=\"message\">%s</span>\n" % (indent(depth), msg.name))
file.write("%s{\n" % indent(depth))
depth += 1
for field in msg.fields:
f_type = field.type
print_doc(file, field, depth)
if f_type.sup_type in CSS_CLASSES:
args = indent(depth), field.q, CSS_CLASSES[f_type.sup_type], field.full_type_name, field.name, field.key
file.write("%s%s <span class=\"%s\">%s</span> %s = %s" % args)
else:
args = indent(depth), field.q, field.full_type_name, field.name, field.key
file.write("%s%s %s %s = %s" % args)
if hasattr(field.options, "default"):
file.write(" [default = %s]" % field.options.default.value)
file.write(";\n")
if f_type.sup_type == protoobjects.MESSAGE:
if not f_type in recurse_list:
print_message(file, f_type, False, depth, recurse_list[:] + [field.type])
if field.type.sup_type == protoobjects.ENUM:
print_enum(file, field.type, depth)
depth -= 1
file.write("%s}\n" % (indent(depth)))
def print_msg_def(dest, service, type, command_or_event, message):
service_name = service.name
version = service.options.version.value.strip("\"")
file_name = "%s.%s.%s.%s.def" % (service_name, version, type, command_or_event.name)
with open(os.path.join(dest, file_name), "wb") as file:
print_message(file, message)
def print_msg_defs(proto_path, dest):
with open(proto_path, "rb") as proto_file:
global_scope = protoparser.parse(proto_file.read())
for c in global_scope.service.commands:
print_msg_def(dest, global_scope.service, "commands", c, c.request_arg)
print_msg_def(dest, global_scope.service, "responses", c, c.response_arg)
for e in global_scope.service.events:
print_msg_def(dest, global_scope.service, "events", e, e.response_arg)
def msg_defs(args):
if not os.path.exists(args.dest): os.mkdir(args.dest)
if os.path.isfile(args.src):
print_masg_defs(args.src, args.dest)
elif os.path.isdir(args.src):
for path in utils.get_proto_files(args.src):
print_msg_defs(path, args.dest)
def setup_subparser(subparsers, config):
subp = subparsers.add_parser("msg-defs", help="Create html documentation.")
subp.add_argument("src", nargs="?", default=".", help="""proto file or directory (default: %(default)s)).""")
subp.add_argument("dest", nargs="?", default="msg-defs", help="the destination directory (default: %(default)s)).")
subp.set_defaults(func=msg_defs)
|
apache-2.0
| 5,219,118,924,902,811,000
| 40.280899
| 120
| 0.594738
| false
| 3.258009
| false
| false
| false
|
mudragada/util-scripts
|
PyProblems/CodeSignal/uberShortestDistance.py
|
1
|
3276
|
#Consider a city where the streets are perfectly laid out to form an infinite square grid.
#In this city finding the shortest path between two given points (an origin and a destination) is much easier than in other more complex cities.
#As a new Uber developer, you are tasked to create an algorithm that does this calculation.
#
#Given user's departure and destination coordinates, each of them located on some street,
# find the length of the shortest route between them assuming that cars can only move along the streets.
# Each street can be represented as a straight line defined by the x = n or y = n formula, where n is an integer.
#
#Example
#
#For departure = [0.4, 1] and destination = [0.9, 3], the output should be
#perfectCity(departure, destination) = 2.7.
#
#0.6 + 2 + 0.1 = 2.7, which is the answer.
#
#Input/Output
#
#[execution time limit] 4 seconds (py3)
#
#[input] array.float departure
#
#An array [x, y] of x and y coordinates. It is guaranteed that at least one coordinate is integer.
#
#Guaranteed constraints:
#0.0 ≤ departure[i] ≤ 10.0.
#
#[input] array.float destination
#
#An array [x, y] of x and y coordinates. It is guaranteed that at least one coordinate is integer.
#
#Guaranteed constraints:
#0.0 ≤ destination[i] ≤ 10.0.
#
#[output] float
#
#The shorted distance between two points along the streets.
import math
def main():
departure = [0.4, 1]
destination = [0.9, 3]
print(perfectCity(departure, destination))
departure = [2.4, 1]
destination = [5, 7.3]
print(perfectCity(departure, destination))
departure = [0, 0.2]
destination = [7, 0.5]
print(perfectCity(departure, destination))
departure = [0.9, 6]
destination = [1.1, 5]
print(perfectCity(departure, destination))
departure = [0, 0.4]
destination = [1, 0.6]
print(perfectCity(departure, destination))
def perfectCity(departure, destination):
print(departure, destination)
x1 = departure[0]
x2 = destination[0]
y1 = departure[1]
y2 = destination[1]
xDist = 0
yDist = 0
if(int(x1) > int(x2)):
xDist = x1 - math.floor(x1) + math.ceil(x2) - x2
elif(int(x1) < int(x2)):
xDist = math.ceil(x1) - x1 + x2 - math.floor(x2)
elif(int(x1) == int(x2) and (x1+x2-int(x1)-int(x2)) <=1):
xDist = x1-math.floor(x1) + x2-math.floor(x2)
else:
xDist = math.ceil(x1)-x1 + math.ceil(x2)-x2
print("X Distance = " + str(xDist))
if(int(y1) > int(y2)):
if(isinstance(y1, int)):
y1x = y1
else:
y1x = y1 - math.floor(y1)
if(isinstance(y2, int)):
y2x = -y2
else:
y2x = math.ceil(y2) - y2
yDist = y1x + y2x
elif(int(y1) < int(y2)):
if(isinstance(y1, int)):
y1x = -y1
else:
y1x = math.ceil(y1) - y1
if(isinstance(y2, int)):
y2x = y2
else:
y2x = y2 - math.floor(y2)
yDist = y1x + y2x
elif(int(x1) == int(x2) and (x1+x2-int(x1)-int(x2)) <=1):
yDist = y1-math.floor(y1) + y2-math.floor(y2)
else:
yDist = math.ceil(y1)-y1 + math.ceil(y2)-y2
print("Y Distance = " + str(yDist))
return xDist + yDist
if __name__ == '__main__':
main()
|
mit
| 2,286,087,225,133,462,500
| 27.920354
| 144
| 0.612607
| false
| 2.87423
| false
| false
| false
|
kbase/auth_service
|
python-libs/oauth.py
|
1
|
8778
|
import logging
import httplib2
import json
import os
import hashlib
# This module performs authentication based on the tokens
# issued by Globus Online's Nexus service, see this URL for
# details:
# http://globusonline.github.com/nexus-docs/api.html
#
# Import the Globus Online client libraries, originally
# sourced from:
# https://github.com/globusonline/python-nexus-client
from nexus import Client
from django.contrib.auth.models import AnonymousUser,User
from django.contrib.auth import login,authenticate
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.conf import settings
from django.http import HttpResponse
from pprint import pformat
"""
This is the 2-legged OAuth authentication code from tastypie
heavily modified into a django authentication middleware.
We base this on RemoteUserMiddleware so that we can get access to the
request object to have access to the request headers, and then we
simply re-use the existing remote user backend code
https://docs.djangoproject.com/en/1.4/howto/auth-remote-user/
You configure it the same way using the normal instructions, except
that you use this module oauth.TwoLeggedOAuthMiddleware instead of
django.contrib.auth.middleware.RemoteUserMiddleware
The django.contrib.auth.backends.RemoteUserBackend module is also
used with this module, add it into the AUTHENTICATION_BACKENDS
declaration in settings.py
To set the authentiction service to be used, set AUTHSVC in your
settings.py file. Here is an example:
AUTHSVC = 'https://graph.api.go.sandbox.globuscs.info/'
Django modules can check the request.META['KBASEsessid'] for the
session ID that will be used within the KBase session management
infrastructure
To test this, bind the sample handler into urls.py like this:
...
from oauth import AuthStatus
...
urlpatterns = patterns( ...
...
url(r'^authstatus/?$', AuthStatus),
...
)
Then visit the authstatus URL to see the auth state.
If you have the perl Bio::KBase::AuthToken libraries installed,
you can test it like this:
token=`perl -MBio::KBase::AuthToken -e 'print Bio::KBase::AuthToken->new( user_id => "papa", password => "papa")->token,"\n";'`
curl -H "Authorization: Bearer $token" http://127.0.0.1:8000/authstatus/
Steve Chan
sychan@lbl.gov
9/6/2012
Previous documentation follows:
This is a simple 2-legged OAuth authentication model for tastypie.
Copied nearly verbatim from gregbayer's piston example
- https://github.com/gregbayer/django-piston-two-legged-oauth
Dependencies:
- python-oauth2: https://github.com/simplegeo/python-oauth2
Adapted from example:
- http://philipsoutham.com/post/2172924723/two-legged-oauth-in-python
"""
class OAuth2Middleware(AuthenticationMiddleware):
"""
Two Legged OAuth authenticator.
This Authentication method checks for a provided HTTP_AUTHORIZATION
and looks up to see if this is a valid OAuth Consumer
"""
# Authentication server
# Create a Python Globus client
client = Client(config_file=os.path.join(os.path.dirname(__file__), 'nexus/nexus.yml'))
try:
authsvc = "https://%s/" % client.config['server']
except:
authsvc = 'https://nexus.api.globusonline.org/'
# Set the salt used for computing a session hash from the signature hash
salt = "(African || European)?"
def __init__(self, realm='API'):
self.realm = realm
self.user = None
self.http = httplib2.Http(disable_ssl_certificate_validation=True)
# The shortcut option will bypass token validation if we already have a django session
self.shortcut = False
def process_request(self, request):
"""
Verify 2-legged oauth request. Parameters accepted as
values in "Authorization" header, or as a GET request
or in a POST body.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
if 'HTTP_AUTHORIZATION' in request.META:
auth_header = request.META.get('HTTP_AUTHORIZATION')
else:
logging.debug("No authorization header found.")
return None
# Extract the token based on whether it is an OAuth or Bearer
# token
if auth_header[:6] == 'OAuth ':
token = auth_header[6:]
elif auth_header[:7] == 'Bearer ':
token = auth_header[7:]
else:
logging.info("Authorization header did not contain OAuth or Bearer type token")
return None
# Push the token into the META for future reference
request.META['KBASEtoken'] = token
if (request.user.is_authenticated() and self.shortcut):
return
user_id = OAuth2Middleware.client.authenticate_user( token)
if not user_id:
logging.error("Authentication token failed validation")
return None
else:
logging.info("Validated as user " + user_id)
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
profile = self.get_profile(token)
if (profile == None):
logging.error("Token validated, but could not retrieve user profile")
return None
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
# See if the username is already associated with any currently logged
# in user, if so just pass over the rest
# Raises exception if it doesn't pass
user = authenticate(remote_user=profile['username'])
if user:
request.user = user
# For now, compute a sessionid based on hashing the
# the signature with the salt
request.META['KBASEsessid'] = hashlib.sha256(token_map['sig']+OAuth2Middleware.salt).hexdigest()
print pformat( request.META['KBASEsessid'])
# Add in some useful details that came in from the token validation
request.META['KBASEprofile'] = profile
login(request,user)
else:
logging.error( "Failed to return user from call to authenticate() with username " + profile['username'])
except KeyError, e:
logging.exception("KeyError in TwoLeggedOAuthMiddleware: %s" % e)
request.user = AnonymousUser()
except Exception, e:
logging.exception("Error in TwoLeggedOAuthMiddleware: %s" % e)
def get_profile(self,token):
try:
token_map = {}
for entry in token.split('|'):
key, value = entry.split('=')
token_map[key] = value
keyurl = self.__class__.authsvc + "/users/" + token_map['un'] + "?custom_fields=*"
res,body = self.http.request(keyurl,"GET",
headers={ 'Authorization': 'Globus-Goauthtoken ' + token })
if (200 <= int(res.status)) and ( int(res.status) < 300):
profile = json.loads( body)
return profile
logging.error( body)
raise Exception("HTTP", res)
except Exception, e:
logging.exception("Error in get_profile.")
return None
def AuthStatus(request):
res = "request.user.is_authenticated = %s \n" % request.user.is_authenticated()
if request.user.is_authenticated():
res = res + "request.user.username = %s\n" % request.user.username
if 'KBASEsessid' in request.META:
res = res + "Your KBase SessionID is %s\n" % request.META['KBASEsessid']
if 'KBASEprofile' in request.META:
res = res + "Your profile record is:\n%s\n" % pformat( request.META['KBASEprofile'])
if 'KBASEtoken' in request.META:
res = res + "Your OAuth token is:\n%s\n" % pformat( request.META['KBASEtoken'])
return HttpResponse(res)
|
mit
| -9,132,790,155,269,310,000
| 39.451613
| 127
| 0.640009
| false
| 4.244681
| false
| false
| false
|
SecuredByTHEM/ndr-server
|
ndr_server/recorder.py
|
1
|
5041
|
#!/usr/bin/python3
# Copyright (C) 2017 Secured By THEM
# Original Author: Michael Casadevall <mcasadevall@them.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Repesentation of an recorder'''
import datetime
import time
import ndr
import ndr_server
class Recorder(object):
'''Recorders are a system running the NDR package, and represent a source of data'''
def __init__(self, config):
self.config = config
self.pg_id = None
self.site_id = None
self.human_name = None
self.hostname = None
self.enlisted_at = None
self.last_seen = None
self.image_build_date = None
self.image_type = None
def __eq__(self, other):
# Recorders equal each other if the pg_id matches each other
# since its the same record in the database
if self.pg_id is None:
return False
return self.pg_id == other.pg_id
@classmethod
def create(cls, config, site, human_name, hostname, db_conn=None):
'''Creates the recorder within the database'''
recorder = cls(config)
recorder.human_name = human_name
recorder.hostname = hostname
recorder.site_id = site.pg_id
recorder.enlisted_at = time.time()
recorder.last_seen = recorder.enlisted_at
recorder.pg_id = config.database.run_procedure_fetchone(
"admin.insert_recorder", [site.pg_id, human_name, hostname],
existing_db_conn=db_conn)[0]
return recorder
def from_dict(self, recorder_dict):
'''Deserializes an recorder from a dictionary'''
self.human_name = recorder_dict['human_name']
self.hostname = recorder_dict['hostname']
self.site_id = recorder_dict['site_id']
self.pg_id = recorder_dict['id']
self.image_build_date = recorder_dict['image_build_date']
self.image_type = recorder_dict['image_type']
return self
def get_site(self, db_conn=None):
'''Gets the site object for this recorder'''
return ndr_server.Site.read_by_id(self.config, self.site_id, db_conn)
def set_recorder_sw_revision(self, image_build_date, image_type, db_conn):
'''Sets the recorder's software revision, and image type and updates the database
with that information'''
# Make sure we have an integer coming in
image_build_date = int(image_build_date)
self.config.database.run_procedure("admin.set_recorder_sw_revision",
[self.pg_id, image_build_date, image_type],
existing_db_conn=db_conn)
self.image_build_date = image_build_date
self.image_type = image_type
def get_message_ids_recieved_in_time_period(self,
message_type: ndr.IngestMessageTypes,
start_period: datetime.datetime,
end_period: datetime.datetime,
db_conn):
'''Retrieves message IDs recieved in for a given period. Returns None if
if no ids are found'''
message_ids = self.config.database.run_procedure_fetchone(
"admin.get_recorder_message_ids_recieved_in_period",
[self.pg_id,
message_type.value,
start_period,
end_period],
existing_db_conn=db_conn)[0]
return message_ids
@classmethod
def read_by_id(cls, config, recorder_id, db_conn=None):
'''Loads an recorder by ID number'''
rec = cls(config)
return rec.from_dict(config.database.run_procedure_fetchone(
"ingest.select_recorder_by_id", [recorder_id], existing_db_conn=db_conn))
@classmethod
def read_by_hostname(cls, config, hostname, db_conn=None):
'''Loads a recorder based of it's hostname in the database'''
rec = cls(config)
return rec.from_dict(config.database.run_procedure_fetchone(
"ingest.select_recorder_by_hostname", [hostname], existing_db_conn=db_conn))
@staticmethod
def get_all_recorder_names(config, db_conn=None):
'''Returns a list of all recorder names in the database'''
return config.database.run_procedure(
"admin.get_all_recorder_names", [], existing_db_conn=db_conn)
|
agpl-3.0
| 4,874,409,016,993,405,000
| 38.077519
| 89
| 0.620115
| false
| 3.938281
| true
| false
| false
|
phildini/django-invitations
|
invitations/models.py
|
1
|
2846
|
import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from allauth.account.adapter import DefaultAccountAdapter
from allauth.account.adapter import get_adapter
from .managers import InvitationManager
from .app_settings import app_settings
from . import signals
@python_2_unicode_compatible
class Invitation(models.Model):
email = models.EmailField(unique=True, verbose_name=_('e-mail address'))
accepted = models.BooleanField(verbose_name=_('accepted'), default=False)
created = models.DateTimeField(verbose_name=_('created'),
default=timezone.now)
key = models.CharField(verbose_name=_('key'), max_length=64, unique=True)
sent = models.DateTimeField(verbose_name=_('sent'), null=True)
objects = InvitationManager()
@classmethod
def create(cls, email):
key = get_random_string(64).lower()
instance = cls._default_manager.create(
email=email,
key=key)
return instance
def key_expired(self):
expiration_date = (
self.sent + datetime.timedelta(
days=app_settings.INVITATION_EXPIRY))
return expiration_date <= timezone.now()
def send_invitation(self, request, **kwargs):
current_site = (kwargs['site'] if 'site' in kwargs
else Site.objects.get_current())
invite_url = reverse('invitations:accept-invite',
args=[self.key])
invite_url = request.build_absolute_uri(invite_url)
ctx = {
'invite_url': invite_url,
'site_name': current_site.name,
'email': self.email,
'key': self.key,
}
email_template = 'invitations/email/email_invite'
get_adapter().send_mail(
email_template,
self.email,
ctx)
self.sent = timezone.now()
self.save()
signals.invite_url_sent.send(
sender=self.__class__,
instance=self,
invite_url_sent=invite_url,
inviter=request.user)
def __str__(self):
return "Invite: {0}".format(self.email)
class InvitationsAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
if hasattr(request, 'session') and request.session.get('account_verified_email'):
return True
elif app_settings.INVITATION_ONLY is True:
# Site is ONLY open for invites
return False
else:
# Site is open to signup
return True
|
gpl-3.0
| -2,722,871,065,632,220,700
| 31.340909
| 89
| 0.625439
| false
| 4.166911
| false
| false
| false
|
massimo-nocentini/competitive-programming
|
UVa/1062.py
|
1
|
2541
|
#_________________________________________________________________________
import fileinput
from contextlib import contextmanager
@contextmanager
def line_bind(line, *ctors, splitter=lambda l: l.split(' '), do=None):
'''
Split `line` argument producing an iterable of mapped elements, in the sense of `ctors`.
Keyword argument `splitter` splits the given `line` respect `space` (' ')
character; however, it is possible to provide the desired behavior providing
a custom lambda expression of one parameter, eventually instantiated with `line`.
The iterable produced by `splitter` should match argument `ctors` in length;
if this holds, an iterable of mapped elements is produced, composed of elements
built by application of each function in `ctors` to element in the split, pairwise.
On the other hand, mapping happens according to the rules of `zip` if lengths differ.
Keyword argument `do` is an higher order operator, defaults to `None`: if
given, it should be a function that receive the generator, which is returned, otherwise.
Moreover, the returned iterable object is a generator, so a linear scan of the line
*is not* performed, hence there is no need to consume an higher order operator to
be applied during the scan, this provide good performances at the same time.
'''
g = (c(v) for c, v in zip(ctors, splitter(line)))
yield do(g) if do else g
@contextmanager
def stdin_input(getter=lambda: fileinput.input(), raw_iter=False):
'''
Produces a way to fetch lines by a source.
Keyword argument `getter` should be a thunk that produces an iterable, call it `i`;
by default, it produces the iterator which reads from standard input.
Keyword argument `raw_iter` is a boolean. If it is `True`, that iterator `i` is
returned as it is; otherwise, a thunk is returned which wraps the application `next(i)`.
'''
iterable = getter()
yield iterable if raw_iter else (lambda: next(iterable))
#________________________________________________________________________
with stdin_input() as next_line:
from itertools import count
for i in count(1):
containers = next_line()
if containers == 'end\n': break
stacks = []
for c in containers:
for s in stacks:
if c <= s[-1]:
s.append(c)
break
else:
stacks.append([c])
print("Case {}: {}".format(i, len(stacks)))
|
mit
| -4,782,286,211,673,975,000
| 34.291667
| 92
| 0.623377
| false
| 4.513321
| false
| false
| false
|
markvdw/mltools
|
mltools/optimise_scg.py
|
1
|
5939
|
# Copyright I. Nabney, N.Lawrence and James Hensman (1996 - 2014)
# Scaled Conjuagte Gradients, originally in Matlab as part of the Netlab toolbox by I. Nabney, converted to python N. Lawrence and given a pythonic interface by James Hensman
# Modified from GPy SCG optimisation
from __future__ import print_function
import numpy as np
import sys
def print_out(len_maxiters, fnow, current_grad, beta, iteration):
print('\r', end=' ')
print('{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
def exponents(fnow, current_grad):
exps = [np.abs(np.float(fnow)), current_grad]
return np.sign(exps) * np.log10(exps).astype(int)
def SCG(f, gradf, x, optargs=(), callback=None, maxiter=500, max_f_eval=np.inf, display=True, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Scaled Conjugate Gradients (SCG)
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-6
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-5
sigma0 = 1.0e-7
fold = f(x, *optargs) # Initial function value.
function_eval = 1
fnow = fold
gradnew = gradf(x, *optargs) # Initial gradient.
function_eval += 1
#if any(np.isnan(gradnew)):
# raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value"
current_grad = np.dot(gradnew, gradnew)
gradold = gradnew.copy()
d = -gradnew # Initial search direction.
success = True # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e15 # Upper bound on scale.
status = "Not converged"
iteration = 0
len_maxiters = len(str(maxiter))
if display:
print(' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters))
exps = exponents(fnow, current_grad)
p_iter = iteration
# Main optimization loop.
while iteration < maxiter:
# Calculate first and second directional derivatives.
if success:
mu = np.dot(d, gradnew)
if mu >= 0:
d = -gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
sigma = sigma0 / np.sqrt(kappa)
xplus = x + sigma * d
gplus = gradf(xplus, *optargs)
function_eval += 1
theta = np.dot(d, (gplus - gradnew)) / sigma
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta * kappa
if delta <= 0:
delta = beta * kappa
beta = beta - theta / kappa
alpha = -mu / delta
# Calculate the comparison ratio.
xnew = x + alpha * d
fnew = f(xnew, *optargs)
function_eval += 1
if function_eval >= max_f_eval:
status = "maximum number of function evaluations exceeded"
break
Delta = 2.*(fnew - fold) / (alpha * mu)
if Delta >= 0.:
success = True
nsuccess += 1
x = xnew
fnow = fnew
else:
success = False
fnow = fold
# Store relevant variables
if callback is not None:
callback(x, fval=fnow, gval=gradnew)
iteration += 1
if display:
print_out(len_maxiters, fnow, current_grad, beta, iteration)
n_exps = exponents(fnow, current_grad)
if iteration - p_iter >= 20 * np.random.rand():
a = iteration >= p_iter * 2.78
b = np.any(n_exps < exps)
if a or b:
p_iter = iteration
print('')
if b:
exps = n_exps
if success:
# Test for termination
if (np.abs(fnew - fold) < ftol):
status = 'converged - relative reduction in objective'
break
elif (np.max(np.abs(alpha * d)) < xtol):
status = 'converged - relative stepsize'
break
else:
# Update variables for new position
gradold = gradnew
gradnew = gradf(x, *optargs)
function_eval += 1
current_grad = np.dot(gradnew, gradnew)
fold = fnew
# If the gradient is zero then we are done.
if current_grad <= gtol:
status = 'converged - relative reduction in gradient'
break
# Adjust beta according to comparison ratio.
if Delta < 0.25:
beta = min(4.0 * beta, betamax)
if Delta > 0.75:
beta = max(0.25 * beta, betamin)
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if nsuccess == x.size:
d = -gradnew
beta = 1. # This is not in the original paper
nsuccess = 0
elif success:
Gamma = np.dot(gradold - gradnew, gradnew) / (mu)
d = Gamma * d - gradnew
else:
# If we get here, then we haven't terminated in the given number of
# iterations.
status = "maxiter exceeded"
if display:
print_out(len_maxiters, fnow, current_grad, beta, iteration)
print("")
print(status)
return x, status
|
mit
| -192,725,720,858,400,350
| 32.937143
| 228
| 0.555144
| false
| 3.704928
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.