repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
akvo/django-registration
|
refs/heads/master
|
registration/admin.py
|
107
|
from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
|
telerik/cloudbase-init
|
refs/heads/master
|
cloudbaseinit/plugins/windows/sethostname.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins import base
from cloudbaseinit.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SetHostNamePlugin(base.BasePlugin):
def execute(self, service, shared_data):
meta_data = service.get_meta_data('openstack')
if 'hostname' not in meta_data:
LOG.debug('Hostname not found in metadata')
return (base.PLUGIN_EXECUTION_DONE, False)
osutils = osutils_factory.OSUtilsFactory().get_os_utils()
new_host_name = meta_data['hostname'].split('.', 1)[0]
reboot_required = osutils.set_host_name(new_host_name)
return (base.PLUGIN_EXECUTION_DONE, reboot_required)
|
impulse-cloud/django-crispy-forms
|
refs/heads/dev
|
crispy_forms/base.py
|
9
|
class KeepContext(object):
"""
Context manager that receives a `django.template.Context` instance and a list of keys
Once the context manager is exited, it removes `keys` from the context, to avoid
side effects in later layout objects that may use the same context variables.
Layout objects should use `extra_context` to introduce context variables, never
touch context object themselves, that could introduce side effects.
"""
def __init__(self, context, keys):
self.context = context
self.keys = keys
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
for key in list(self.keys):
del self.context[key]
|
onecloud/neutron
|
refs/heads/master
|
neutron/tests/unit/nec/test_config.py
|
8
|
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from neutron.plugins.nec.common import config
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual('br-int', config.CONF.OVS.integration_bridge)
self.assertEqual(2, config.CONF.AGENT.polling_interval)
self.assertEqual('sudo', config.CONF.AGENT.root_helper)
self.assertEqual('127.0.0.1', config.CONF.OFC.host)
self.assertEqual('8888', config.CONF.OFC.port)
# Check path_prefix is an empty string explicitly.
self.assertEqual('', config.CONF.OFC.path_prefix)
self.assertEqual('trema', config.CONF.OFC.driver)
self.assertTrue(config.CONF.OFC.enable_packet_filter)
self.assertFalse(config.CONF.OFC.use_ssl)
self.assertIsNone(config.CONF.OFC.key_file)
self.assertIsNone(config.CONF.OFC.cert_file)
def test_shortcuts(self):
self.assertEqual(config.CONF.OVS.integration_bridge,
config.OVS.integration_bridge)
self.assertEqual(config.CONF.AGENT.polling_interval,
config.AGENT.polling_interval)
self.assertEqual(config.CONF.OFC.host, config.OFC.host)
|
isb-cgc/ISB-CGC-Webapp
|
refs/heads/master
|
workbooks/migrations/0004_auto_20160614_1131.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-14 18:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workbooks', '0003_auto_20160304_1518'),
]
operations = [
migrations.AlterField(
model_name='workbook',
name='last_date_saved',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='workbook_last_view',
name='last_view',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='worksheet',
name='last_date_saved',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='worksheet_plot_cohort',
name='cohort',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='worksheet_plot_cohorts', to='workbooks.Worksheet_cohort'),
),
]
|
jamescoineron/octane
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
datapythonista/pandas
|
refs/heads/master
|
pandas/tests/tseries/holiday/test_observance.py
|
7
|
from datetime import datetime
import pytest
from pandas.tseries.holiday import (
after_nearest_workday,
before_nearest_workday,
nearest_workday,
next_monday,
next_monday_or_tuesday,
next_workday,
previous_friday,
previous_workday,
sunday_to_monday,
weekend_to_monday,
)
_WEDNESDAY = datetime(2014, 4, 9)
_THURSDAY = datetime(2014, 4, 10)
_FRIDAY = datetime(2014, 4, 11)
_SATURDAY = datetime(2014, 4, 12)
_SUNDAY = datetime(2014, 4, 13)
_MONDAY = datetime(2014, 4, 14)
_TUESDAY = datetime(2014, 4, 15)
_NEXT_WEDNESDAY = datetime(2014, 4, 16)
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_next_monday(day):
assert next_monday(day) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_MONDAY, _TUESDAY)]
)
def test_next_monday_or_tuesday(day, expected):
assert next_monday_or_tuesday(day) == expected
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_previous_friday(day):
assert previous_friday(day) == _FRIDAY
def test_sunday_to_monday():
assert sunday_to_monday(_SUNDAY) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_nearest_workday(day, expected):
assert nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_weekend_to_monday(day, expected):
assert weekend_to_monday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_WEDNESDAY, _THURSDAY),
(_THURSDAY, _FRIDAY),
(_SATURDAY, _MONDAY),
(_SUNDAY, _MONDAY),
(_MONDAY, _TUESDAY),
(_TUESDAY, _NEXT_WEDNESDAY), # WED is same week as TUE
],
)
def test_next_workday(day, expected):
assert next_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _FRIDAY), (_TUESDAY, _MONDAY)]
)
def test_previous_workday(day, expected):
assert previous_workday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_THURSDAY, _WEDNESDAY),
(_FRIDAY, _THURSDAY),
(_SATURDAY, _THURSDAY),
(_SUNDAY, _FRIDAY),
(_MONDAY, _FRIDAY), # last week Friday
(_TUESDAY, _MONDAY),
(_NEXT_WEDNESDAY, _TUESDAY), # WED is same week as TUE
],
)
def test_before_nearest_workday(day, expected):
assert before_nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_FRIDAY, _MONDAY)]
)
def test_after_nearest_workday(day, expected):
assert after_nearest_workday(day) == expected
|
agermanidis/Pattern
|
refs/heads/master
|
en/inflect/__init__.py
|
1
|
#### PATTERN | EN | INFLECT ##########################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
######################################################################################################
# A set of rule-based tools for word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - comparatives and superlatives of adjectives.
import re
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
VERB = "VB"
NOUN = "NN"
ADJECTIVE = "JJ"
ADVERB = "RB"
#### ARTICLE #########################################################################################
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
article_rules = [
["euler|hour(?!i)|heir|honest|hono", "an"], # exceptions: an hour, an honor
# Abbreviations:
# strings of capitals starting with a vowel-sound consonant followed by another consonant,
# which are not likely to be real words.
["(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]", "an"],
["^[aefhilmnorsx][.-]", "an"],
["^[a-z][.-]", "a"],
["^[^aeiouy]", "a"], # consonants: a bear
["^e[uw]", "a"], # eu like "you": a european
["^onc?e", "a"], # o like "wa": a one-liner
["uni([^nmd]|mo)", "a"], # u like "you": a university
["^u[bcfhjkqrst][aeiou]", "a"], # u like "you": a uterus
["^[aeiou]", "an"], # vowels: an owl
["y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)", "an"], # y like "i": an yclept, a year
["", "a"] # guess "a"
]
# Compile the regular expressions.
for p in article_rules:
p[0] = re.compile(p[0])
def definite_article(word):
return "the"
def indefinite_article(word):
""" Returns the indefinite article for a given word.
For example: university => a university.
"""
word = word.split(" ")[0]
for rule, article in article_rules:
if rule.search(word) is not None:
return article
DEFINITE = "definite"
INDEFINITE = "indefinite"
def article(word, function=INDEFINITE):
""" Returns the indefinite (a/an) or definite (the) article for the given word.
"""
return function==DEFINITE and definite_article(word) or indefinite_article(word)
_article = article
def referenced(word, article=INDEFINITE):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article), word)
#print referenced("hour")
#print referenced("FBI")
#print referenced("bear")
#print referenced("one-liner")
#print referenced("european")
#print referenced("university")
#print referenced("uterus")
#print referenced("owl")
#print referenced("yclept")
#print referenced("year")
#### PLURALIZE ########################################################################################
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = [
"about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind",
"below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during",
"except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over",
"since", "till", "to", "under", "until", "unto", "upon", "with"
]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0) Indefinite articles and demonstratives.
[["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1) Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2) Possessive pronouns.
[["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3) Personal pronouns.
[["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4) Words that do not inflect.
[["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5) Irregular plurals (mongoose, oxen).
[["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True]
],
# 6) Irregular inflections for common suffixes (synopses, mice, men).
[["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7) Fully assimilated classical inflections (vertebrae, codices).
[["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8) Classical variants of modern inflections (stigmata, soprani).
[["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9) -ch, -sh and -ss take -es in the plural (churches, classes).
[["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10) Certain words ending in -f or -fe take -ves in the plural (lives, wolves).
[["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11) -y takes -ys if preceded by a vowel or when a proper noun,
# but -ies if preceded by a consonant (storeys, Marys, stories).
[["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12) Some words ending in -o take -os, the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os (lassos, potatoes, bamboos).
[["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13) Miltary stuff (Major Generals).
[["l$", "ls", "general-generals", False]
],
# 14) Otherwise, assume that the plural just adds -s (cats, programmes).
[["$", "s", None, False]
],
]
# For performance, compile the regular expressions only once:
for ruleset in plural_rules:
for rule in ruleset:
rule[0] = re.compile(rule[0])
# Suffix categories.
plural_categories = {
"uninflected": [
"bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps",
"corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti",
"headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel",
"measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon",
"scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable": [
"advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture",
"garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage",
"mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice",
"sand", "software", "understanding", "water"],
"s-singular": [
"acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas",
"chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis",
"ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros",
"sassafras", "trellis"],
"ex-ices": ["codex", "murex", "silex"],
"ex-ices-classical": [
"apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a": [
"agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum",
"ovum", "stratum"],
"um-a-classical": [
"aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium",
"enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium",
"memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum",
"spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a": [
"aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion",
"phenomenon", "prolegomenon"],
"a-ae": ["alga", "alumna", "vertebra"],
"a-ae-classical": [
"abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna",
"medusa", "nebula", "nova", "parabola"],
"en-ina-classical": ["foramen", "lumen", "stamen"],
"a-ata-classical": [
"anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema",
"enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma",
"schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical": ["clitoris", "iris"],
"us-i-classical": [
"focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus",
"torus", "umbilicus", "uterus"],
"us-us-classical": [
"apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus",
"sinus", "status"],
"o-i-classical": ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical": ["afreet", "afrit", "efreet"],
"-im-classical": ["cherub", "goy", "seraph"],
"o-os": [
"albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco",
"generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto",
"manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals": [
"Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
def pluralize(word, pos=NOUN, custom={}, classical=True):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes").
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives.
# Remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe (dog's -> dogs').
if word.endswith("'") or word.endswith("'s"):
owner = word.rstrip("'s")
owners = pluralize(owner, pos, custom, classical)
if owners.endswith("s"):
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], pluralize(words[0], pos, custom, classical))
elif words[1] in plural_prepositions:
return word.replace(words[0], pluralize(words[0], pos, custom, classical))
else:
return word.replace(words[-1], pluralize(words[-1], pos, custom, classical))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos.startswith(ADJECTIVE):
n = [0, 1]
# Apply pluralization rules.
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule, or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
# A rule relating to a specific category of words.
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
return word
#print pluralize("part-of-speech")
#print pluralize("child")
#print pluralize("dog's")
#print pluralize("wolf")
#print pluralize("bear")
#print pluralize("kitchen knife")
#print pluralize("octopus", classical=True)
#print pluralize("matrix", classical=True)
#print pluralize("matrix", classical=False)
#print pluralize("my", pos=ADJECTIVE)
#### SINGULARIZE ######################################################################################
# Adapted from Bermi Ferrer's Inflector for Python:
# http://www.bermi.org/inflector/
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
singular_rules = [
['(?i)(.)ae$' , '\\1a'],
['(?i)(.)itis$' , '\\1itis'],
['(?i)(.)eaux$' , '\\1eau'],
['(?i)(quiz)zes$' , '\\1'],
['(?i)(matr)ices$' , '\\1ix'],
['(?i)(ap|vert|ind)ices$' , '\\1ex'],
['(?i)^(ox)en' , '\\1'],
['(?i)(alias|status)es$' , '\\1'],
['(?i)([octop|vir])i$' , '\\1us'],
['(?i)(cris|ax|test)es$' , '\\1is'],
['(?i)(shoe)s$' , '\\1'],
['(?i)(o)es$' , '\\1'],
['(?i)(bus)es$' , '\\1'],
['(?i)([m|l])ice$' , '\\1ouse'],
['(?i)(x|ch|ss|sh)es$' , '\\1'],
['(?i)(m)ovies$' , '\\1ovie'],
['(?i)ombies$' , '\\1ombie'],
['(?i)(s)eries$' , '\\1eries'],
['(?i)([^aeiouy]|qu)ies$' , '\\1y'],
# Certain words ending in -f or -fe take -ves in the plural (lives, wolves).
["([aeo]l)ves$", "\\1f"],
["([^d]ea)ves$", "\\1f"],
["arves$", "arf"],
["erves$", "erve"],
["([nlw]i)ves$", "\\1fe"],
['(?i)([lr])ves$' , '\\1f'],
["([aeo])ves$", "\\1ve"],
['(?i)(sive)s$' , '\\1'],
['(?i)(tive)s$' , '\\1'],
['(?i)(hive)s$' , '\\1'],
['(?i)([^f])ves$' , '\\1fe'],
# -es suffix.
['(?i)(^analy)ses$' , '\\1sis'],
['(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$' , '\\1\\2sis'],
['(?i)(.)opses$' , '\\1opsis'],
['(?i)(.)yses$' , '\\1ysis'],
['(?i)(h|d|r|o|n|b|cl|p)oses$' , '\\1ose'],
['(?i)(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$' , '\\1ose'],
['(?i)(.)oses$' , '\\1osis'],
# -a
['(?i)([ti])a$' , '\\1um'],
['(?i)(n)ews$' , '\\1ews'],
['(?i)s$' , ''],
]
# For performance, compile the regular expressions only once:
for rule in singular_rules:
rule[0] = re.compile(rule[0])
singular_uninflected = [
"bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps",
"corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti",
"headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel",
"measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon",
"scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"
]
singular_uncountable = [
"advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture",
"garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage",
"mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand",
"software", "understanding", "water"
]
singular_ie = [
"algerie", "auntie", "beanie", "birdie", "bogie", "bombie", "bookie", "collie", "cookie", "cutie",
"doggie", "eyrie", "freebie", "goonie", "groupie", "hankie", "hippie", "hoagie", "hottie",
"indie", "junkie", "laddie", "laramie", "lingerie", "meanie", "nightie", "oldie", "^pie",
"pixie", "quickie", "reverie", "rookie", "softie", "sortie", "stoolie", "sweetie", "techie",
"^tie", "toughie", "valkyrie", "veggie", "weenie", "yuppie", "zombie"
]
singular_irregular = {
"men" : "man",
"people" : "person",
"children" : "child",
"sexes" : "sex",
"axes" : "axe",
"moves" : "move",
"teeth" : "tooth",
"geese" : "goose",
"feet" : "foot",
"zoa" : "zoon",
"atlantes" : "atlas",
"atlases" : "atlas",
"beeves" : "beef",
"brethren" : "brother",
"children" : "child",
"corpora" : "corpus",
"corpuses" : "corpus",
"kine" : "cow",
"ephemerides" : "ephemeris",
"ganglia" : "ganglion",
"genii" : "genie",
"genera" : "genus",
"graffiti" : "graffito",
"helves" : "helve",
"leaves" : "leaf",
"loaves" : "loaf",
"monies" : "money",
"mongooses" : "mongoose",
"mythoi" : "mythos",
"octopodes" : "octopus",
"opera" : "opus",
"opuses" : "opus",
"oxen" : "ox",
"penes" : "penis",
"penises" : "penis",
"soliloquies" : "soliloquy",
"testes" : "testis",
"trilbys" : "trilby",
"turves" : "turf",
"numena" : "numen",
"occipita" : "occiput",
"our" : "my",
}
def singularize(word, pos=NOUN, custom={}):
if word in custom.keys():
return custom[word]
# Recursion of compound words (e.g. mothers-in-law).
if "-" in word:
words = word.split("-")
if len(words) > 1 and words[1] in plural_prepositions:
return singularize(words[0], pos, custom)+"-"+"-".join(words[1:])
# dogs' => dog's
if word.endswith("'"):
return singularize(word[:-1]) + "'s"
lower = word.lower()
for w in singular_uninflected:
if w.endswith(lower):
return word
for w in singular_uncountable:
if w.endswith(lower):
return word
for w in singular_ie:
if lower.endswith(w+"s"):
return w
for w in singular_irregular.keys():
if lower.endswith(w):
return re.sub('(?i)'+w+'$', singular_irregular[w], word)
for rule in singular_rules:
suffix, inflection = rule
match = suffix.search(word)
if match:
groups = match.groups()
for k in range(0,len(groups)):
if groups[k] == None:
inflection = inflection.replace('\\'+str(k+1), '')
return suffix.sub(inflection, word)
return word
#### VERB CONJUGATION #################################################################################
# Each verb has morphs for infinitive, 3rd singular present, present participle, past and past participle.
# Verbs like "be" have other morphs as well (i.e. I am, you are, she is, they aren't).
# The following verbs can be negated: be, can, do, will, must, have, may, need, dare, ought.
_verb_tenses = None
_verb_lemmas = None
def _load_verbs():
# The data is lazily loaded when base() is called the first time.
# The verb.txt morphology is adopted from the XTAG morph_english.flat:
# http://www.cis.upenn.edu/~xtag/
global _verb_tenses
global _verb_lemmas
_verb_tenses = {}
_verb_lemmas = {}
path = os.path.join(MODULE, "verbs.txt")
data = open(path).readlines()
for i in range(len(data)):
if not data[i].startswith(";;;"):
tenses = data[i].strip().split(",")
base = tenses[0]
_verb_tenses[base] = tenses
for x in tenses:
if x != "":
_verb_lemmas[x] = base
del data
BASE = INFINITIVE = INF = VB = "infinitive"
PRESENT_1ST_PERSON_SINGULAR = VBP = "present 1st person singular"
PRESENT_2ND_PERSON_SINGULAR = "present 2nd person singular"
PRESENT_3RD_PERSON_SINGULAR = VBZ = "present 3rd person singular"
PRESENT_PLURAL = "present plural"
PRESENT_PARTICIPLE = PROGRESSIVE = GERUND = VBG = "present participle"
PAST = VBD = "past"
PAST_1ST_PERSON_SINGULAR = "past 1st person singular"
PAST_2ND_PERSON_SINGULAR = "past 2nd person singular"
PAST_3RD_PERSON_SINGULAR = "past 3rd person singular"
PAST_PLURAL = "past plural"
PAST_PARTICIPLE = VBN = "past participle"
_verb_tenses_keys = {
INFINITIVE : 0,
PRESENT_1ST_PERSON_SINGULAR : 1,
PRESENT_2ND_PERSON_SINGULAR : 2,
PRESENT_3RD_PERSON_SINGULAR : 3,
PRESENT_PLURAL : 4,
PRESENT_PARTICIPLE : 5,
PAST_1ST_PERSON_SINGULAR : 6,
PAST_2ND_PERSON_SINGULAR : 7,
PAST_3RD_PERSON_SINGULAR : 8,
PAST_PLURAL : 9,
PAST : 10,
PAST_PARTICIPLE : 11
}
_verb_tenses_aliases = (
(INFINITIVE, ["inf", "VB"]),
(PRESENT_1ST_PERSON_SINGULAR, ["1sg", "VBP"]),
(PRESENT_2ND_PERSON_SINGULAR, ["2sg"]),
(PRESENT_3RD_PERSON_SINGULAR, ["3sg", "VBZ"]),
(PRESENT_PLURAL, ["pl", "plural"]),
(PRESENT_PARTICIPLE, ["part", "prog", "progressive", "gerund", "VBG"]),
(PAST, ["p", "VBD"]),
(PAST_1ST_PERSON_SINGULAR, ["1sgp"]),
(PAST_2ND_PERSON_SINGULAR, ["2sgp"]),
(PAST_3RD_PERSON_SINGULAR, ["3sgp"]),
(PAST_PLURAL, ["ppl", "pplural"]),
(PAST_PARTICIPLE, ["ppart", "VBN"]),
)
# Reform the aliases to a (alias, tense)-dictionary.
a = {}
for key, values in _verb_tenses_aliases:
for v in values:
a[v] = key
_verb_tenses_aliases = a
def base(verb, parse=False):
if _verb_tenses is None:
_load_verbs()
if verb.lower() in _verb_lemmas:
return _verb_lemmas[verb.lower()]
if verb in _verb_lemmas:
return _verb_lemmas[verb]
if parse is True:
# Rule-based approach.
return _parse_lemma(verb)
infinitive = lemma = base
def conjugate(verb, tense=INFINITIVE, negated=False, parse=False):
""" Inflects the verb and returns the given tense (or None).
For example: be
- present 1sg/2sg/3sg/pl => I am, you are, she is, we are
- present participle => being,
- past => I was, you were, he was, we were
- past participle => been,
- negated present => I am not, you aren't, it isn't.
"""
t = _verb_tenses_aliases.get(tense, tense) # Disambiguate aliases like "pl" => "present plural".
i = _verb_tenses_keys.get(t) # Get the associated tense index.
b = base(verb)
if negated is True:
i += len(_verb_tenses_keys) # Negated forms are at the end of the list of tenses.
if b is not None:
x = _verb_tenses[b][i]
if x == "":
if 0<i<=5: return b # no morph for 1sg/2sg/3sg => return base form.
if 5<i<=9: return _verb_tenses[b][10] # no morph for 1sgp/2sgp/3sgp => return past form.
if x != "":
return x
if parse is True:
# Rule-based approach.
return _parse_lexeme(_parse_lemma(verb))[i]
def conjugations(verb, parse=False):
""" Returns all possible inflections of the given verb.
"""
b = base(verb)
if b is not None:
a = [x for x in _verb_tenses.get(b,[]) if x != ""]
u = []; [u.append(x) for x in a if x not in u]
return u
if parse is True:
# Rule-based approach.
a = _parse_lexeme(_parse_lemma(verb))
return [a[0], a[3], a[5], a[6]]
lexeme = conjugations
class Tenses(list):
def __contains__(self, tense):
# t in tenses(verb) also works when t is an alias (e.g. "1sg").
return list.__contains__(self, _verb_tenses_aliases.get(tense, tense))
def tenses(verb, parse=True):
""" Returns a list of tenses for the given verb inflection.
"""
verb = verb.lower()
b = base(verb)
a = []
if b in _verb_tenses:
for tense, i in _verb_tenses_keys.items():
t = _verb_tenses[b]
if t[i] == verb \
or t[i+len(_verb_tenses_keys)] == verb \
or t[i] == "" and 0<i<=5 and verb == b \
or t[i] == "" and 5<i<=9 and verb == t[10]:
a.append(tense)
if parse is True:
# Rule-based approach.
v = _parse_lexeme(_parse_lemma(verb))
[a.append(tense) for tense, i in _verb_tenses_keys.items() if v[i] == verb]
return Tenses(sorted(a))
#--- RULE-BASED VERB CONJUGATION ----------------------------------------------------------------------
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
def _parse_lexeme(verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
if len(v) > 1 and v.endswith("e") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "e": dance, save, devote, evolve.
return [v, v, v, v+"s", v, v[:-1]+"ing"] + [v+"d"]*6
if len(v) > 1 and v.endswith("y") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "y": comply, copy, magnify.
return [v, v, v, v[:-1]+"ies", v, v+"ing"] + [v[:-1]+"ied"]*6
if v.endswith(("ss","sh","ch","x")):
# Verbs ending in sibilants: kiss, bless, box, polish, preach.
return [v, v, v, v+"es", v, v+"ing"] + [v+"ed"]*6
if v.endswith("ic"):
# Verbs ending in -ic: panic, mimic.
return [v, v, v, v+"es", v, v+"king"] + [v+"ked"]*6
if len(v) > 1 and v[-1] not in VOWELS and v[-2] not in VOWELS:
# Verbs ending in a consonant cluster: delight, clamp.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if (len(v) > 1 and v.endswith(("y","w")) and v[-2] in VOWELS) \
or (len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] in VOWELS) \
or (len(v) > 3 and v[-1] not in VOWELS and v[-3] in VOWELS and v[-4] in VOWELS):
# Verbs ending in a long vowel or diphthong followed by a consonant: paint, devour, play.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] not in VOWELS:
# Verbs ending in a short vowel followed by a consonant: chat, chop, or compel.
return [v, v, v, v+"s", v, v+v[-1]+"ing"] + [v+v[-1]+"ed"]*6
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
def _parse_lemma(verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
This is problematic if a verb ending in -e is given in the past tense or gerund
(error rate is about 12/100 in this case).
"""
v = verb.lower()
b = False
if v.endswith("s"):
if v.endswith("ies") and len(v) > 3 and v[-4] not in VOWELS:
return v[:-3]+"y" # complies => comply
if v.endswith(("sses", "shes", "ches", "xes")):
return v[:-2] # kisses => kiss
if v.endswith("ied") and re_vowel.search(v[:-3]) is not None:
return v[:-3]+"y" # envied => envy
if v.endswith("ing") and re_vowel.search(v[:-3]) is not None:
v = v[:-3]; b=True; # chopping => chopp
if v.endswith("ed") and re_vowel.search(v[:-2]) is not None:
v = v[:-2]; b=True; # danced => danc
if b:
# Doubled consonant after short vowel: chopp => chop.
if len(v) > 3 and v[-1] == v[-2] and v[-3] in VOWELS and v[-4] not in VOWELS and not v.endswith("ss"):
return v[:-1]
if v.endswith(("ick","ack")):
return v[:-1] # panick => panic
# Guess common cases where the base form ends in -e:
if v.endswith(("v","z","c","i")):
return v+"e" # danc => dance
if v.endswith("g") and v.endswith(("dg","lg,","ng","rg")):
return v+"e" # indulg => indulge
if v.endswith(("b","d","g","k","l","m","r","s","t")) \
and len(v) > 2 and v[-2] in VOWELS and not v[-3] in VOWELS \
and not v.endswith("er"):
return v+"e" # generat => generate
if v.endswith("n") and v.endswith(("an","in")) and not v.endswith(("ain","oin","oan")):
return v+"e" # imagin => imagine
if v.endswith("l") and len(v) > 1 and v[-2] not in VOWELS:
return v+"e" # squabbl => squabble
if v.endswith("f") and len(v) > 2 and v[-2] in VOWELS and v[-3] not in VOWELS:
return v+"e" # chaf => chafed
if v.endswith("e"):
return v+"e" # decre => decree
if v.endswith(("th","ang","un","cr","vr","rs","ps","tr")):
return v+"e"
return v
#### COMPARATIVE & SUPERLATIVE ########################################################################
VOWELS = "aeiouy"
grade_irregular = {
"bad" : ("worse", "worst"),
"far" : ("further", "farthest"),
"good" : ("better", "best"),
"hind" : ("hinder", "hindmost"),
"ill" : ("worse", "worst"),
"less" : ("lesser", "least"),
"little" : ("less", "least"),
"many" : ("more", "most"),
"much" : ("more", "most"),
"well" : ("better", "best")
}
grade_uninflected = ["giant", "glib", "hurt", "known", "madly"]
COMPARATIVE = "er"
SUPERLATIVE = "est"
def _count_syllables(word):
""" Returns the estimated number of syllables in the word by counting vowel-groups.
"""
n = 0
p = False # True if the previous character was a vowel.
for ch in word:
v = ch in VOWELS
n += int(v and not p)
p = v
return n
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given adjective.
"""
n = _count_syllables(adjective)
if adjective in grade_irregular:
# A number of adjectives inflect irregularly.
return grade_irregular[adjective][suffix!=COMPARATIVE]
elif adjective in grade_uninflected:
# A number of adjectives don't inflect at all.
return "%s %s" % (suffix==COMPARATIVE and "more" or "most", adjective)
elif n <= 2 and adjective.endswith("e"):
# With one syllable and ending with an e: larger, wiser.
suffix = suffix.lstrip("e")
elif n == 1 and len(adjective) >= 3 \
and adjective[-1] not in VOWELS and adjective[-2] in VOWELS and adjective[-3] not in VOWELS:
# With one syllable ending with consonant-vowel-consonant: bigger, thinner.
if not adjective.endswith(("w")): # Exceptions: lower, newer.
suffix = adjective[-1] + suffix
elif n == 1:
# With one syllable ending with more consonants or vowels: briefer.
pass
elif n == 2 and adjective.endswith("y"):
# With two syllables ending with a y: funnier, hairier.
adjective = adjective[:-1] + "i"
elif n == 2 and adjective[-2:] in ("er", "le", "ow"):
# With two syllables and specific suffixes: gentler, narrower.
pass
else:
# With three or more syllables: more generous, more important.
return "%s %s" % (suffix==COMPARATIVE and "more" or "most", adjective)
return adjective + suffix
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
|
2ndy/RaspIM
|
refs/heads/master
|
usr/lib/python2.6/distutils/version.py
|
259
|
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
|
itsjeyd/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/tests/test_course_settings.py
|
11
|
"""
Tests for Studio Course Settings.
"""
import datetime
import ddt
import json
import copy
import mock
from mock import Mock, patch
import unittest
from django.conf import settings
from django.utils.timezone import UTC
from django.test.utils import override_settings
from contentstore.utils import reverse_course_url, reverse_usage_url
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.models.course_details import CourseDetails
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import UserFactory
from xblock_django.models import XBlockStudioConfigurationFlag
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.tabs import InvalidTabsException
from milestones.tests.utils import MilestonesTestCaseMixin
from .utils import CourseTestCase, AjaxEnabledTestClient
def get_url(course_id, handler_name='settings_handler'):
return reverse_course_url(handler_name, course_id)
class CourseSettingsEncoderTest(CourseTestCase):
"""
Tests for CourseSettingsEncoder.
"""
def test_encoder(self):
details = CourseDetails.fetch(self.course.id)
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEqual(jsondetails['course_image_name'], self.course.course_image)
self.assertIsNone(jsondetails['end_date'], "end date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_start'], "enrollment_start date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_end'], "enrollment_end date somehow initialized ")
self.assertIsNone(jsondetails['syllabus'], "syllabus somehow initialized")
self.assertIsNone(jsondetails['intro_video'], "intro_video somehow initialized")
self.assertIsNone(jsondetails['effort'], "effort somehow initialized")
self.assertIsNone(jsondetails['language'], "language somehow initialized")
def test_pre_1900_date(self):
"""
Tests that the encoder can handle a pre-1900 date, since strftime
doesn't work for these dates.
"""
details = CourseDetails.fetch(self.course.id)
pre_1900 = datetime.datetime(1564, 4, 23, 1, 1, 1, tzinfo=UTC())
details.enrollment_start = pre_1900
dumped_jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
loaded_jsondetails = json.loads(dumped_jsondetails)
self.assertEqual(loaded_jsondetails['enrollment_start'], pre_1900.isoformat())
def test_ooc_encoder(self):
"""
Test the encoder out of its original constrained purpose to see if it functions for general use
"""
details = {
'number': 1,
'string': 'string',
'datetime': datetime.datetime.now(UTC())
}
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEquals(1, jsondetails['number'])
self.assertEqual(jsondetails['string'], 'string')
@ddt.ddt
class CourseDetailsViewTest(CourseTestCase, MilestonesTestCaseMixin):
"""
Tests for modifying content on the first course settings page (course dates, overview, etc.).
"""
def setUp(self):
super(CourseDetailsViewTest, self).setUp()
def alter_field(self, url, details, field, val):
"""
Change the one field to the given value and then invoke the update post to see if it worked.
"""
setattr(details, field, val)
# Need to partially serialize payload b/c the mock doesn't handle it correctly
payload = copy.copy(details.__dict__)
payload['start_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.start_date)
payload['end_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.end_date)
payload['enrollment_start'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_start)
payload['enrollment_end'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_end)
resp = self.client.ajax_post(url, payload)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, field + str(val))
@staticmethod
def convert_datetime_to_iso(datetime_obj):
"""
Use the xblock serializer to convert the datetime
"""
return Date().to_json(datetime_obj)
def test_update_and_fetch(self):
SelfPacedConfiguration(enabled=True).save()
details = CourseDetails.fetch(self.course.id)
# resp s/b json from here on
url = get_url(self.course.id)
resp = self.client.get_json(url)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, "virgin get")
utc = UTC()
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 1, 13, 30, tzinfo=utc))
self.alter_field(url, details, 'end_date', datetime.datetime(2013, 2, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_start', datetime.datetime(2012, 10, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_end', datetime.datetime(2012, 11, 15, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'short_description', "Short Description")
self.alter_field(url, details, 'overview', "Overview")
self.alter_field(url, details, 'intro_video', "intro_video")
self.alter_field(url, details, 'effort', "effort")
self.alter_field(url, details, 'course_image_name', "course_image_name")
self.alter_field(url, details, 'language', "en")
self.alter_field(url, details, 'self_paced', "true")
def compare_details_with_encoding(self, encoded, details, context):
"""
compare all of the fields of the before and after dicts
"""
self.compare_date_fields(details, encoded, context, 'start_date')
self.compare_date_fields(details, encoded, context, 'end_date')
self.compare_date_fields(details, encoded, context, 'enrollment_start')
self.compare_date_fields(details, encoded, context, 'enrollment_end')
self.assertEqual(
details['short_description'], encoded['short_description'], context + " short_description not =="
)
self.assertEqual(details['overview'], encoded['overview'], context + " overviews not ==")
self.assertEqual(details['intro_video'], encoded.get('intro_video', None), context + " intro_video not ==")
self.assertEqual(details['effort'], encoded['effort'], context + " efforts not ==")
self.assertEqual(details['course_image_name'], encoded['course_image_name'], context + " images not ==")
self.assertEqual(details['language'], encoded['language'], context + " languages not ==")
def compare_date_fields(self, details, encoded, context, field):
"""
Compare the given date fields between the before and after doing json deserialization
"""
if details[field] is not None:
date = Date()
if field in encoded and encoded[field] is not None:
dt1 = date.from_json(encoded[field])
dt2 = details[field]
self.assertEqual(dt1, dt2, msg="{} != {} at {}".format(dt1, dt2, context))
else:
self.fail(field + " missing from encoded but in details at " + context)
elif field in encoded and encoded[field] is not None:
self.fail(field + " included in encoding but missing from details at " + context)
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course_list_present(self):
settings_details_url = get_url(self.course.id)
response = self.client.get_html(settings_details_url)
self.assertContains(response, "Prerequisite Course")
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course_update_and_fetch(self):
url = get_url(self.course.id)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
# assert pre_requisite_courses is initialized
self.assertEqual([], course_detail_json['pre_requisite_courses'])
# update pre requisite courses with a new course keys
pre_requisite_course = CourseFactory.create(org='edX', course='900', run='test_run')
pre_requisite_course2 = CourseFactory.create(org='edX', course='902', run='test_run')
pre_requisite_course_keys = [unicode(pre_requisite_course.id), unicode(pre_requisite_course2.id)]
course_detail_json['pre_requisite_courses'] = pre_requisite_course_keys
self.client.ajax_post(url, course_detail_json)
# fetch updated course to assert pre_requisite_courses has new values
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
self.assertEqual(pre_requisite_course_keys, course_detail_json['pre_requisite_courses'])
# remove pre requisite course
course_detail_json['pre_requisite_courses'] = []
self.client.ajax_post(url, course_detail_json)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
self.assertEqual([], course_detail_json['pre_requisite_courses'])
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True})
def test_invalid_pre_requisite_course(self):
url = get_url(self.course.id)
resp = self.client.get_json(url)
course_detail_json = json.loads(resp.content)
# update pre requisite courses one valid and one invalid key
pre_requisite_course = CourseFactory.create(org='edX', course='900', run='test_run')
pre_requisite_course_keys = [unicode(pre_requisite_course.id), 'invalid_key']
course_detail_json['pre_requisite_courses'] = pre_requisite_course_keys
response = self.client.ajax_post(url, course_detail_json)
self.assertEqual(400, response.status_code)
@ddt.data(
(False, False, False),
(True, False, True),
(False, True, False),
(True, True, True),
)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_visibility_of_entrance_exam_section(self, feature_flags):
"""
Tests entrance exam section is available if ENTRANCE_EXAMS feature is enabled no matter any other
feature is enabled or disabled i.e ENABLE_MKTG_SITE.
"""
with patch.dict("django.conf.settings.FEATURES", {
'ENTRANCE_EXAMS': feature_flags[0],
'ENABLE_MKTG_SITE': feature_flags[1]
}):
course_details_url = get_url(self.course.id)
resp = self.client.get_html(course_details_url)
self.assertEqual(
feature_flags[2],
'<h3 id="heading-entrance-exam">' in resp.content
)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_marketing_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {
'ENABLE_MKTG_SITE': True,
'ENTRANCE_EXAMS': False,
'ENABLE_PREREQUISITE_COURSES': False
}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Summary Page")
self.assertNotContains(response, "Send a note to students via email")
self.assertContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Card Image")
self.assertContains(response, "Course Short Description")
self.assertNotContains(response, "Course Title")
self.assertNotContains(response, "Course Subtitle")
self.assertNotContains(response, "Course Duration")
self.assertNotContains(response, "Course Description")
self.assertNotContains(response, "Course Overview")
self.assertNotContains(response, "Course Introduction Video")
self.assertNotContains(response, "Requirements")
self.assertNotContains(response, "Course Banner Image")
self.assertNotContains(response, "Course Video Thumbnail Image")
@unittest.skipUnless(settings.FEATURES.get('ENTRANCE_EXAMS', False), True)
def test_entrance_exam_created_updated_and_deleted_successfully(self):
settings_details_url = get_url(self.course.id)
data = {
'entrance_exam_enabled': 'true',
'entrance_exam_minimum_score_pct': '60',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(settings_details_url, data=json.dumps(data), content_type='application/json',
HTTP_ACCEPT='application/json')
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .60)
# Update the entrance exam
data['entrance_exam_enabled'] = "true"
data['entrance_exam_minimum_score_pct'] = "80"
response = self.client.post(
settings_details_url,
data=json.dumps(data),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .80)
# Delete the entrance exam
data['entrance_exam_enabled'] = "false"
response = self.client.post(
settings_details_url,
data=json.dumps(data),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
course = modulestore().get_course(self.course.id)
self.assertEquals(response.status_code, 200)
self.assertFalse(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, None)
@unittest.skipUnless(settings.FEATURES.get('ENTRANCE_EXAMS', False), True)
def test_entrance_exam_store_default_min_score(self):
"""
test that creating an entrance exam should store the default value, if key missing in json request
or entrance_exam_minimum_score_pct is an empty string
"""
settings_details_url = get_url(self.course.id)
test_data_1 = {
'entrance_exam_enabled': 'true',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(
settings_details_url,
data=json.dumps(test_data_1),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
# entrance_exam_minimum_score_pct is not present in the request so default value should be saved.
self.assertEquals(course.entrance_exam_minimum_score_pct, .5)
#add entrance_exam_minimum_score_pct with empty value in json request.
test_data_2 = {
'entrance_exam_enabled': 'true',
'entrance_exam_minimum_score_pct': '',
'syllabus': 'none',
'short_description': 'empty',
'overview': '',
'effort': '',
'intro_video': ''
}
response = self.client.post(
settings_details_url,
data=json.dumps(test_data_2),
content_type='application/json',
HTTP_ACCEPT='application/json'
)
self.assertEquals(response.status_code, 200)
course = modulestore().get_course(self.course.id)
self.assertTrue(course.entrance_exam_enabled)
self.assertEquals(course.entrance_exam_minimum_score_pct, .5)
def test_editable_short_description_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'EDITABLE_SHORT_DESCRIPTION': False}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Short Description")
def test_regular_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False,
'ENABLE_EXTENDED_COURSE_DETAILS': True}):
response = self.client.get_html(settings_details_url)
self.assertContains(response, "Course Summary Page")
self.assertContains(response, "Send a note to students via email")
self.assertNotContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertNotContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Card Image")
self.assertContains(response, "Course Title")
self.assertContains(response, "Course Subtitle")
self.assertContains(response, "Course Duration")
self.assertContains(response, "Course Description")
self.assertContains(response, "Course Short Description")
self.assertContains(response, "Course Overview")
self.assertContains(response, "Course Introduction Video")
self.assertContains(response, "Requirements")
self.assertContains(response, "Course Banner Image")
self.assertContains(response, "Course Video Thumbnail Image")
@ddt.ddt
class CourseGradingTest(CourseTestCase):
"""
Tests for the course settings grading page.
"""
def test_initial_grader(self):
test_grader = CourseGradingModel(self.course)
self.assertIsNotNone(test_grader.graders)
self.assertIsNotNone(test_grader.grade_cutoffs)
def test_fetch_grader(self):
test_grader = CourseGradingModel.fetch(self.course.id)
self.assertIsNotNone(test_grader.graders, "No graders")
self.assertIsNotNone(test_grader.grade_cutoffs, "No cutoffs")
for i, grader in enumerate(test_grader.graders):
subgrader = CourseGradingModel.fetch_grader(self.course.id, i)
self.assertDictEqual(grader, subgrader, str(i) + "th graders not equal")
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_from_json(self, store):
self.course = CourseFactory.create(default_store=store)
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Noop update")
test_grader.graders[0]['weight'] = test_grader.graders[0].get('weight') * 2
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Weight[0] * 2")
# test for bug LMS-11485
with modulestore().bulk_operations(self.course.id):
new_grader = test_grader.graders[0].copy()
new_grader['type'] += '_foo'
new_grader['short_label'] += '_foo'
new_grader['id'] = len(test_grader.graders)
test_grader.graders.append(new_grader)
# don't use altered cached def, get a fresh one
CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__)
test_grader.grade_cutoffs['D'] = 0.3
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "cutoff add D")
test_grader.grace_period = {'hours': 4, 'minutes': 5, 'seconds': 0}
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "4 hour grace period")
def test_update_grader_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user
)
self.assertDictEqual(test_grader.graders[1], altered_grader, "Noop update")
test_grader.graders[1]['min_count'] = test_grader.graders[1].get('min_count') + 2
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "min_count[1] + 2")
test_grader.graders[1]['drop_count'] = test_grader.graders[1].get('drop_count') + 1
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2")
def test_update_cutoffs_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
# Unlike other tests, need to actually perform a db fetch for this test since update_cutoffs_from_json
# simply returns the cutoffs you send into it, rather than returning the db contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "Noop update")
test_grader.grade_cutoffs['D'] = 0.3
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff add D")
test_grader.grade_cutoffs['Pass'] = 0.75
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff change 'Pass'")
def test_delete_grace_period(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user
)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertEqual(test_grader.grace_period, altered_grader.grace_period, "Noop update")
test_grader.grace_period = {'hours': 15, 'minutes': 5, 'seconds': 30}
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grace_period, altered_grader.grace_period, "Adding in a grace period")
test_grader.grace_period = {'hours': 1, 'minutes': 10, 'seconds': 0}
# Now delete the grace period
CourseGradingModel.delete_grace_period(self.course.id, self.user)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
# Once deleted, the grace period should simply be None
self.assertEqual(None, altered_grader.grace_period, "Delete grace period")
def test_update_section_grader_type(self):
# Get the descriptor and the section_grader_type and assert they are the default values
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
# Change the default grader type to Homework, which should also mark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'Homework', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Homework', section_grader_type['graderType'])
self.assertEqual('Homework', descriptor.format)
self.assertEqual(True, descriptor.graded)
# Change the grader type back to notgraded, which should also unmark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'notgraded', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
def test_get_set_grader_types_ajax(self):
"""
Test configuring the graders via ajax calls
"""
grader_type_url_base = get_url(self.course.id, 'grading_handler')
# test get whole
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertIn('graders', whole_model)
self.assertIn('grade_cutoffs', whole_model)
self.assertIn('grace_period', whole_model)
# test post/update whole
whole_model['grace_period'] = {'hours': 1, 'minutes': 30, 'seconds': 0}
response = self.client.ajax_post(grader_type_url_base, whole_model)
self.assertEqual(200, response.status_code)
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertEqual(whole_model['grace_period'], {'hours': 1, 'minutes': 30, 'seconds': 0})
# test get one grader
self.assertGreater(len(whole_model['graders']), 1) # ensure test will make sense
response = self.client.get_json(grader_type_url_base + '/1')
grader_sample = json.loads(response.content)
self.assertEqual(grader_sample, whole_model['graders'][1])
# test add grader
new_grader = {
"type": "Extra Credit",
"min_count": 1,
"drop_count": 2,
"short_label": None,
"weight": 15,
}
response = self.client.ajax_post(
'{}/{}'.format(grader_type_url_base, len(whole_model['graders'])),
new_grader
)
self.assertEqual(200, response.status_code)
grader_sample = json.loads(response.content)
new_grader['id'] = len(whole_model['graders'])
self.assertEqual(new_grader, grader_sample)
# test delete grader
response = self.client.delete(grader_type_url_base + '/1', HTTP_ACCEPT="application/json")
self.assertEqual(204, response.status_code)
response = self.client.get_json(grader_type_url_base)
updated_model = json.loads(response.content)
new_grader['id'] -= 1 # one fewer and the id mutates
self.assertIn(new_grader, updated_model['graders'])
self.assertNotIn(whole_model['graders'][1], updated_model['graders'])
def setup_test_set_get_section_grader_ajax(self):
"""
Populate the course, grab a section, get the url for the assignment type access
"""
self.populate_course()
sections = modulestore().get_items(self.course.id, qualifiers={'category': "sequential"})
# see if test makes sense
self.assertGreater(len(sections), 0, "No sections found")
section = sections[0] # just take the first one
return reverse_usage_url('xblock_handler', section.location)
def test_set_get_section_grader_ajax(self):
"""
Test setting and getting section grades via the grade as url
"""
grade_type_url = self.setup_test_set_get_section_grader_ajax()
response = self.client.ajax_post(grade_type_url, {'graderType': u'Homework'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'Homework')
# and unset
response = self.client.ajax_post(grade_type_url, {'graderType': u'notgraded'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'notgraded')
@ddt.ddt
class CourseMetadataEditingTest(CourseTestCase):
"""
Tests for CourseMetadata.
"""
def setUp(self):
CourseTestCase.setUp(self)
self.fullcourse = CourseFactory.create()
self.course_setting_url = get_url(self.course.id, 'advanced_settings_handler')
self.fullcourse_setting_url = get_url(self.fullcourse.id, 'advanced_settings_handler')
self.notes_tab = {"type": "notes", "name": "My Notes"}
def test_fetch_initial_fields(self):
test_model = CourseMetadata.fetch(self.course)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_fetch_giturl_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is on, show the setting as a non-deprecated Advanced Setting.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_fetch_giturl_not_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is off, don't show the setting at all on the Advanced Settings page.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_validate_update_filtered_off(self):
"""
If feature flag is off, then giturl must be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_validate_update_filtered_on(self):
"""
If feature flag is on, then giturl must not be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_update_from_json_filtered_on(self):
"""
If feature flag is on, then giturl must be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_update_from_json_filtered_off(self):
"""
If feature flag is on, then giturl must not be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_edxnotes_present(self):
"""
If feature flag ENABLE_EDXNOTES is on, show the setting as a non-deprecated Advanced Setting.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_edxnotes_not_present(self):
"""
If feature flag ENABLE_EDXNOTES is off, don't show the setting at all on the Advanced Settings page.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_validate_update_filtered_edxnotes_off(self):
"""
If feature flag is off, then edxnotes must be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertNotIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_validate_update_filtered_edxnotes_on(self):
"""
If feature flag is on, then edxnotes must not be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
def test_update_from_json_filtered_edxnotes_on(self):
"""
If feature flag is on, then edxnotes must be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertIn('edxnotes', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': False})
def test_update_from_json_filtered_edxnotes_off(self):
"""
If feature flag is off, then edxnotes must not be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"edxnotes": {"value": "true"},
},
user=self.user
)
self.assertNotIn('edxnotes', test_model)
def test_allow_unsupported_xblocks(self):
"""
allow_unsupported_xblocks is only shown in Advanced Settings if
XBlockStudioConfigurationFlag is enabled.
"""
self.assertNotIn('allow_unsupported_xblocks', CourseMetadata.fetch(self.fullcourse))
XBlockStudioConfigurationFlag(enabled=True).save()
self.assertIn('allow_unsupported_xblocks', CourseMetadata.fetch(self.fullcourse))
def test_validate_from_json_correct_inputs(self):
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
"advanced_modules": {"value": ['notes']},
},
user=self.user
)
self.assertTrue(is_valid)
self.assertEqual(len(errors), 0)
self.update_check(test_model)
# Tab gets tested in test_advanced_settings_munge_tabs
self.assertIn('advanced_modules', test_model, 'Missing advanced_modules')
self.assertEqual(test_model['advanced_modules']['value'], ['notes'], 'advanced_module is not updated')
def test_validate_from_json_wrong_inputs(self):
# input incorrectly formatted data
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users", },
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
},
user=self.user
)
# Check valid results from validate_and_update_from_json
self.assertFalse(is_valid)
self.assertEqual(len(errors), 3)
self.assertFalse(test_model)
error_keys = set([error_obj['model']['display_name'] for error_obj in errors])
test_keys = set(['Advanced Module List', 'Course Advertised Start Date', 'Days Early for Beta Users'])
self.assertEqual(error_keys, test_keys)
# try fresh fetch to ensure no update happened
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.assertNotEqual(test_model['advertised_start']['value'], 1, 'advertised_start should not be updated to a wrong value')
self.assertNotEqual(test_model['days_early_for_beta']['value'], "supposed to be an integer",
'days_early_for beta should not be updated to a wrong value')
def test_correct_http_status(self):
json_data = json.dumps({
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {
"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users",
},
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
})
response = self.client.ajax_post(self.course_setting_url, json_data)
self.assertEqual(400, response.status_code)
def test_update_from_json(self):
test_model = CourseMetadata.update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
},
user=self.user
)
self.update_check(test_model)
# try fresh fetch to ensure persistence
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.update_check(test_model)
# now change some of the existing metadata
test_model = CourseMetadata.update_from_json(
fresh,
{
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"},
},
user=self.user
)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def update_check(self, test_model):
"""
checks that updates were made
"""
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
self.assertIn('advertised_start', test_model, 'Missing new advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start A', "advertised_start not expected value")
self.assertIn('days_early_for_beta', test_model, 'Missing days_early_for_beta metadata field')
self.assertEqual(test_model['days_early_for_beta']['value'], 2, "days_early_for_beta not expected value")
def test_http_fetch_initial_fields(self):
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
response = self.client.get_json(self.fullcourse_setting_url)
test_model = json.loads(response.content)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
def test_http_update_from_json(self):
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
})
test_model = json.loads(response.content)
self.update_check(test_model)
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.update_check(test_model)
# now change some of the existing metadata
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"}
})
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def test_advanced_components_munge_tabs(self):
"""
Test that adding and removing specific advanced components adds and removes tabs.
"""
# First ensure that none of the tabs are visible
self.assertNotIn(self.notes_tab, self.course.tabs)
# Now enable student notes and verify that the "My Notes" tab has been added
self.client.ajax_post(self.course_setting_url, {
'advanced_modules': {"value": ["notes"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(self.notes_tab, course.tabs)
# Disable student notes and verify that the "My Notes" tab is gone
self.client.ajax_post(self.course_setting_url, {
'advanced_modules': {"value": [""]}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(self.notes_tab, course.tabs)
def test_advanced_components_munge_tabs_validation_failure(self):
with patch('contentstore.views.course._refresh_course_tabs', side_effect=InvalidTabsException):
resp = self.client.ajax_post(self.course_setting_url, {
'advanced_modules': {"value": ["notes"]}
})
self.assertEqual(resp.status_code, 400)
error_msg = [
{
'message': 'An error occurred while trying to save your tabs',
'model': {'display_name': 'Tabs Exception'}
}
]
self.assertEqual(json.loads(resp.content), error_msg)
# verify that the course wasn't saved into the modulestore
course = modulestore().get_course(self.course.id)
self.assertNotIn("notes", course.advanced_modules)
@ddt.data(
[{'type': 'course_info'}, {'type': 'courseware'}, {'type': 'wiki', 'is_hidden': True}],
[{'type': 'course_info', 'name': 'Home'}, {'type': 'courseware', 'name': 'Course'}],
)
def test_course_tab_configurations(self, tab_list):
self.course.tabs = tab_list
modulestore().update_item(self.course, self.user.id)
self.client.ajax_post(self.course_setting_url, {
'advanced_modules': {"value": ["notes"]}
})
course = modulestore().get_course(self.course.id)
tab_list.append(self.notes_tab)
self.assertEqual(tab_list, course.tabs)
@patch.dict(settings.FEATURES, {'ENABLE_EDXNOTES': True})
@patch('xmodule.util.django.get_current_request')
def test_post_settings_with_staff_not_enrolled(self, mock_request):
"""
Tests that we can post advance settings when course staff is not enrolled.
"""
mock_request.return_value = Mock(META={'HTTP_HOST': 'localhost'})
user = UserFactory.create(is_staff=True)
CourseStaffRole(self.course.id).add_users(user)
client = AjaxEnabledTestClient()
client.login(username=user.username, password=user.password)
response = self.client.ajax_post(self.course_setting_url, {
'advanced_modules': {"value": [""]}
})
self.assertEqual(response.status_code, 200)
class CourseGraderUpdatesTest(CourseTestCase):
"""
Test getting, deleting, adding, & updating graders
"""
def setUp(self):
"""Compute the url to use in tests"""
super(CourseGraderUpdatesTest, self).setUp()
self.url = get_url(self.course.id, 'grading_handler')
self.starting_graders = CourseGradingModel(self.course).graders
def test_get(self):
"""Test getting a specific grading type record."""
resp = self.client.get_json(self.url + '/0')
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(self.starting_graders[0], obj)
def test_delete(self):
"""Test deleting a specific grading type record."""
resp = self.client.delete(self.url + '/0', HTTP_ACCEPT="application/json")
self.assertEqual(resp.status_code, 204)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertNotIn(self.starting_graders[0], current_graders)
self.assertEqual(len(self.starting_graders) - 1, len(current_graders))
def test_update(self):
"""Test updating a specific grading type record."""
grader = {
"id": 0,
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post(self.url + '/0', grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders), len(current_graders))
def test_add(self):
"""Test adding a grading type record."""
# the same url works for changing the whole grading model (graceperiod, cutoffs, and grading types) when
# the grading_index is None; thus, using None to imply adding a grading_type doesn't work; so, it uses an
# index out of bounds to imply create item.
grader = {
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post('{}/{}'.format(self.url, len(self.starting_graders) + 1), grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj['id'], len(self.starting_graders))
del obj['id']
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders) + 1, len(current_graders))
class CourseEnrollmentEndFieldTest(CourseTestCase):
"""
Base class to test the enrollment end fields in the course settings details view in Studio
when using marketing site flag and global vs non-global staff to access the page.
"""
NOT_EDITABLE_HELPER_MESSAGE = "Contact your edX partner manager to update these settings."
NOT_EDITABLE_DATE_WRAPPER = "<div class=\"field date is-not-editable\" id=\"field-enrollment-end-date\">"
NOT_EDITABLE_TIME_WRAPPER = "<div class=\"field time is-not-editable\" id=\"field-enrollment-end-time\">"
NOT_EDITABLE_DATE_FIELD = "<input type=\"text\" class=\"end-date date end\" \
id=\"course-enrollment-end-date\" placeholder=\"MM/DD/YYYY\" autocomplete=\"off\" readonly aria-readonly=\"true\" />"
NOT_EDITABLE_TIME_FIELD = "<input type=\"text\" class=\"time end\" id=\"course-enrollment-end-time\" \
value=\"\" placeholder=\"HH:MM\" autocomplete=\"off\" readonly aria-readonly=\"true\" />"
EDITABLE_DATE_WRAPPER = "<div class=\"field date \" id=\"field-enrollment-end-date\">"
EDITABLE_TIME_WRAPPER = "<div class=\"field time \" id=\"field-enrollment-end-time\">"
EDITABLE_DATE_FIELD = "<input type=\"text\" class=\"end-date date end\" \
id=\"course-enrollment-end-date\" placeholder=\"MM/DD/YYYY\" autocomplete=\"off\" />"
EDITABLE_TIME_FIELD = "<input type=\"text\" class=\"time end\" \
id=\"course-enrollment-end-time\" value=\"\" placeholder=\"HH:MM\" autocomplete=\"off\" />"
EDITABLE_ELEMENTS = [
EDITABLE_DATE_WRAPPER,
EDITABLE_TIME_WRAPPER,
EDITABLE_DATE_FIELD,
EDITABLE_TIME_FIELD,
]
NOT_EDITABLE_ELEMENTS = [
NOT_EDITABLE_HELPER_MESSAGE,
NOT_EDITABLE_DATE_WRAPPER,
NOT_EDITABLE_TIME_WRAPPER,
NOT_EDITABLE_DATE_FIELD,
NOT_EDITABLE_TIME_FIELD,
]
def setUp(self):
""" Initialize course used to test enrollment fields. """
super(CourseEnrollmentEndFieldTest, self).setUp()
self.course = CourseFactory.create(org='edX', number='dummy', display_name='Marketing Site Course')
self.course_details_url = reverse_course_url('settings_handler', unicode(self.course.id))
def _get_course_details_response(self, global_staff):
""" Return the course details page as either global or non-global staff"""
user = UserFactory(is_staff=global_staff)
CourseInstructorRole(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
return self.client.get_html(self.course_details_url)
def _verify_editable(self, response):
""" Verify that the response has expected editable fields.
Assert that all editable field content exists and no
uneditable field content exists for enrollment end fields.
"""
self.assertEqual(response.status_code, 200)
for element in self.NOT_EDITABLE_ELEMENTS:
self.assertNotContains(response, element)
for element in self.EDITABLE_ELEMENTS:
self.assertContains(response, element)
def _verify_not_editable(self, response):
""" Verify that the response has expected non-editable fields.
Assert that all uneditable field content exists and no
editable field content exists for enrollment end fields.
"""
self.assertEqual(response.status_code, 200)
for element in self.NOT_EDITABLE_ELEMENTS:
self.assertContains(response, element)
for element in self.EDITABLE_ELEMENTS:
self.assertNotContains(response, element)
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_MKTG_SITE': False})
def test_course_details_with_disabled_setting_global_staff(self):
""" Test that user enrollment end date is editable in response.
Feature flag 'ENABLE_MKTG_SITE' is not enabled.
User is global staff.
"""
self._verify_editable(self._get_course_details_response(True))
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_MKTG_SITE': False})
def test_course_details_with_disabled_setting_non_global_staff(self):
""" Test that user enrollment end date is editable in response.
Feature flag 'ENABLE_MKTG_SITE' is not enabled.
User is non-global staff.
"""
self._verify_editable(self._get_course_details_response(False))
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_course_details_with_enabled_setting_global_staff(self):
""" Test that user enrollment end date is editable in response.
Feature flag 'ENABLE_MKTG_SITE' is enabled.
User is global staff.
"""
self._verify_editable(self._get_course_details_response(True))
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_MKTG_SITE': True})
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_course_details_with_enabled_setting_non_global_staff(self):
""" Test that user enrollment end date is not editable in response.
Feature flag 'ENABLE_MKTG_SITE' is enabled.
User is non-global staff.
"""
self._verify_not_editable(self._get_course_details_response(False))
|
0x0all/SASM
|
refs/heads/master
|
Windows/MinGW64/opt/lib/python2.7/sre_parse.py
|
49
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name %r" %
name)
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in backref group name "
"%r" % name)
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "missing group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
|
alikins/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_group.py
|
47
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Chris Hoffman <choffman@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_group
version_added: "1.7"
short_description: Add and remove local groups
description:
- Add and remove local groups.
- For non-Windows targets, please use the M(group) module instead.
options:
name:
description:
- Name of the group
required: true
default: null
aliases: []
description:
description:
- Description of the group
required: false
default: null
aliases: []
state:
description:
- Create or remove the group
required: false
choices:
- present
- absent
default: present
aliases: []
notes:
- For non-Windows targets, please use the M(group) module instead.
author: "Chris Hoffman (@chrishoffman)"
'''
EXAMPLES = r'''
- name: Create a new group
win_group:
name: deploy
description: Deploy Group
state: present
- name: Remove a group
win_group:
name: deploy
state: absent
'''
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/surface/_hoverlabel.py
|
2
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="surface", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
wendellpbarreto/badroc
|
refs/heads/master
|
system/dashboard.py
|
1
|
"""
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = 'rochas.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(modules.AppList(
_('Rocks'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
models=(
'system.core.models.IgneousRock',
'system.core.models.SedimentaryRock',
'system.core.models.MetamorphicRock',
),
))
self.children.append(modules.Group(
_('Administration'),
column=1,
collapsible=True,
children = [
modules.AppList(
_('General configurations'),
collapsible=False,
column=1,
models=(
'system.core.models.Person',
),
),
modules.AppList(
_('Igneous Rocks'),
collapsible=False,
column=1,
models=(
'system.core.models.IgneousRockColor',
'system.core.models.IgneousRockComposition',
'system.core.models.IgneousRockMineralogy',
'system.core.models.IgneousRockOrigin',
'system.core.models.IgneousRockStructure',
'system.core.models.IgneousRockTexture',
),
),
modules.AppList(
_('Metamorphic Rocks'),
collapsible=False,
column=1,
models=(
'system.core.models.MetamorphicRockStructure',
'system.core.models.MetamorphicRockMineral',
),
),
modules.AppList(
_('Sedimentary Rocks'),
collapsible=False,
column=1,
models=(
'system.core.models.SedimentaryRockOrigin',
'system.core.models.SedimentaryRockGranularity',
'system.core.models.SedimentaryRockFraction',
'system.core.models.SedimentaryRockConstitution',
'system.core.models.SedimentaryRockStructure',
'system.core.models.SedimentaryRockFormation',
),
),
]
))
self.children.append(modules.LinkList(
_('Pages'),
column=2,
children=[
{
'title': _('rochas GUI'),
'url': '/home/',
'external': False,
},
# {
# 'title': _('Grappelli Documentation'),
# 'url': 'http://packages.python.org/django-grappelli/',
# 'external': True,
# },
# {
# 'title': _('Grappelli Google-Code'),
# 'url': 'http://code.google.com/p/django-grappelli/',
# 'external': True,
# },
]
))
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
column=2,
collapsible=False,
limit=5,
))
|
CoDaS-Lab/image_analysis
|
refs/heads/master
|
demo/img_classifier.py
|
1
|
import os
import sys
from sklearn import datasets, metrics
from image_analysis.pipeline import Pipeline
from image_analysis.pipeline import SVM
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
pipe = Pipeline(models={'SVM': SVM()})
pipe.train(data[:n_samples // 2], digits.target[:n_samples // 2])
expected = digits.target[n_samples // 2:]
predicted = pipe.predict(data[n_samples // 2:])
print(metrics.classification_report(expected, predicted['SVM']))
|
philanthropy-u/edx-platform
|
refs/heads/master
|
openedx/core/lib/xblock_builtin/__init__.py
|
34
|
"""
Helper functions shared by built-in XBlocks.
"""
from django.conf import settings
def get_css_dependencies(group):
"""
Returns list of CSS dependencies belonging to `group` in settings.PIPELINE_JS.
Respects `PIPELINE_ENABLED` setting.
"""
if settings.PIPELINE_ENABLED:
return [settings.PIPELINE_CSS[group]['output_filename']]
else:
return settings.PIPELINE_CSS[group]['source_filenames']
def get_js_dependencies(group):
"""
Returns list of JS dependencies belonging to `group` in settings.PIPELINE_JS.
Respects `PIPELINE_ENABLED` setting.
"""
if settings.PIPELINE_ENABLED:
return [settings.PIPELINE_JS[group]['output_filename']]
else:
return settings.PIPELINE_JS[group]['source_filenames']
|
luistorresm/odoo
|
refs/heads/8.0
|
openerp/addons/base/module/wizard/__init__.py
|
365
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_module_update
import base_language_install
import base_import_language
import base_module_upgrade
import base_module_configuration
import base_export_language
import base_update_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dovydas/mezzanine
|
refs/heads/master
|
mezzanine/accounts/urls.py
|
5
|
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from mezzanine.conf import settings
ACCOUNT_URL = getattr(settings, "ACCOUNT_URL", "/accounts/")
SIGNUP_URL = getattr(settings, "SIGNUP_URL",
"/%s/signup/" % ACCOUNT_URL.strip("/"))
SIGNUP_VERIFY_URL = getattr(settings, "SIGNUP_VERIFY_URL",
"/%s/verify/" % ACCOUNT_URL.strip("/"))
LOGIN_URL = settings.LOGIN_URL
LOGOUT_URL = settings.LOGOUT_URL
PROFILE_URL = getattr(settings, "PROFILE_URL", "/users/")
PROFILE_UPDATE_URL = getattr(settings, "PROFILE_UPDATE_URL",
"/%s/update/" % ACCOUNT_URL.strip("/"))
PASSWORD_RESET_URL = getattr(settings, "PASSWORD_RESET_URL",
"/%s/password/reset/" % ACCOUNT_URL.strip("/"))
PASSWORD_RESET_VERIFY_URL = getattr(settings, "PASSWORD_RESET_VERIFY_URL",
"/%s/password/verify/" %
ACCOUNT_URL.strip("/"))
_verify_pattern = "/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)"
_slash = "/" if settings.APPEND_SLASH else ""
urlpatterns = patterns("mezzanine.accounts.views",
url("^%s%s$" % (LOGIN_URL.strip("/"), _slash),
"login", name="login"),
url("^%s%s$" % (LOGOUT_URL.strip("/"), _slash),
"logout", name="logout"),
url("^%s%s$" % (SIGNUP_URL.strip("/"), _slash),
"signup", name="signup"),
url("^%s%s%s$" % (SIGNUP_VERIFY_URL.strip("/"), _verify_pattern, _slash),
"signup_verify", name="signup_verify"),
url("^%s%s$" % (PROFILE_UPDATE_URL.strip("/"), _slash),
"profile_update", name="profile_update"),
url("^%s%s$" % (PASSWORD_RESET_URL.strip("/"), _slash),
"password_reset", name="mezzanine_password_reset"),
url("^%s%s%s$" %
(PASSWORD_RESET_VERIFY_URL.strip("/"), _verify_pattern, _slash),
"password_reset_verify", name="password_reset_verify"),
url("^%s%s$" % (ACCOUNT_URL.strip("/"), _slash),
"account_redirect", name="account_redirect"),
)
if settings.ACCOUNTS_PROFILE_VIEWS_ENABLED:
urlpatterns += patterns("mezzanine.accounts.views",
url("^%s%s$" % (PROFILE_URL.strip("/"), _slash),
"profile_redirect", name="profile_redirect"),
url("^%s/(?P<username>.*)%s$" % (PROFILE_URL.strip("/"), _slash),
"profile", name="profile"),
)
|
mitar/django
|
refs/heads/master
|
django/db/models/query.py
|
2
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import itertools
import sys
from django.core import exceptions
from django.db import connections, router, transaction, IntegrityError
from django.db.models.fields import AutoField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import sql
from django.utils.functional import partition
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
return len(self._result_cache)
def __iter__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist as e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = False
if connections[self.db].features.supports_select_related:
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
aggregate_select = self.query.aggregate_select.keys()
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
index_start = len(extra_select)
aggregate_start = index_start + len(load_fields or self.model._meta.fields)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
if fill_cache:
klass_info = get_klass_info(model, max_depth=max_depth,
requested=requested, only_load=only_load)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(row, index_start, db, klass_info,
offset=len(aggregate_select))
else:
if skip:
row_data = row[index_start:aggregate_start]
obj = model_cls(**dict(zip(init_list, row_data)))
else:
# Omit aggregates in object creation.
obj = model(*row[index_start:aggregate_start])
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i+aggregate_start])
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.model._meta.object_name, kwargs))
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def bulk_create(self, objs):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. We're punting on these for now
# because they are relatively rare cases.
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_fields
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self.model._base_manager._insert(objs, fields=fields, using=self.db)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self.model._base_manager._insert(objs_with_pk, fields=fields, using=self.db)
if objs_without_pk:
self.model._base_manager._insert(objs_without_pk, fields=[f for f in fields if not isinstance(f, AutoField)], using=self.db)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
return objs
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
try:
self._for_write = True
return self.get(**lookup), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError as e:
transaction.savepoint_rollback(sid, using=self.db)
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
# Re-raise the IntegrityError with its original traceback.
raise exc_info[1], None, exc_info[2]
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering()
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
qs.query.clear_ordering(force_empty=True)
return dict([(obj._get_pk_val(), obj) for obj in qs])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering()
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, **kwargs):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
# Default to false for nowait
nowait = kwargs.pop('nowait', False)
obj = self._clone()
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the
the list is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = getattr(self, '_fields', None)
if names is None:
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names)
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield next(iter([]))
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, fields=None):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
def aggregate(self, *args, **kwargs):
"""
Return a dict mapping the aggregate names to None
"""
for arg in args:
kwargs[arg.default_alias] = arg
return dict([(key, None) for key in kwargs])
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
only_load=None, local_only=False):
"""
Helper function that recursively returns an information for a klass, to be
used in get_cached_row. It exists just to compute this information only
once for entire queryset. Otherwise it would be computed for each row, which
leads to poor perfomance on large querysets.
Arguments:
* klass - the class to retrieve (and instantiate)
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
if only_load:
load_fields = only_load.get(klass) or set()
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.name)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
if skip:
klass = deferred_class_factory(klass, skip)
field_names = init_list
else:
field_names = ()
else:
# Load all fields on klass
# We trying to not populate field_names variable for perfomance reason.
# If field_names variable is set, it is used to instantiate desired fields,
# by passing **dict(zip(field_names, fields)) as kwargs to Model.__init__ method.
# But kwargs version of Model.__init__ is slower, so we should avoid using
# it when it is not really neccesary.
if local_only and len(klass._meta.local_fields) != len(klass._meta.fields):
field_count = len(klass._meta.local_fields)
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_count = len(klass._meta.fields)
field_names = ()
restricted = requested is not None
related_fields = []
for f in klass._meta.fields:
if select_related_descend(f, restricted, requested):
if restricted:
next = requested[f.name]
else:
next = None
klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load)
related_fields.append((f, klass_info))
reverse_related_fields = []
if restricted:
for o in klass._meta.get_all_related_objects():
if o.field.unique and select_related_descend(o.field, restricted, requested, reverse=True):
next = requested[o.field.related_query_name()]
klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load, local_only=True)
reverse_related_fields.append((o.field, klass_info))
return klass, field_names, field_count, related_fields, reverse_related_fields
def get_cached_row(row, index_start, using, klass_info, offset=0):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* offset - the number of additional fields that are known to
exist in row for `klass`. This usually means the number of
annotated results on `klass`.
* using - the database alias on which the query is being executed.
* klass_info - result of the get_klass_info function
"""
if klass_info is None:
return None
klass, field_names, field_count, related_fields, reverse_related_fields = klass_info
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
else:
if field_names:
obj = klass(**dict(zip(field_names, fields)))
else:
obj = klass(*fields)
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
# Instantiate related fields
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f, klass_info in related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
for f, klass_info in reverse_related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values
# on the related object
for rel_field, rel_model in rel_obj._meta.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in model_init_field_names.iteritems():
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "<RawQuerySet: %r>" % (self.raw_query % tuple(self.params))
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
from django.db.models.sql.constants import LOOKUP_SEP
if len(result_cache) == 0:
return # nothing to do
model = result_cache[0].__class__
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_lookups = set() # list of lookups like foo__bar__baz
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = [] # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = itertools.chain(related_lookups, auto_lookups)
for lookup in all_lookups:
if lookup in done_lookups:
# We've done exactly this already, skip the whole thing
continue
done_lookups.add(lookup)
# Top level, the list of objects to decorate is the the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
attrs = lookup.split(LOOKUP_SEP)
for level, attr in enumerate(attrs):
# Prepare main instances
if len(obj_list) == 0:
break
good_objects = True
for obj in obj_list:
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
else:
# We already did this list
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogenous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(attr, first_obj.__class__.__name__, lookup))
if level == len(attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to a item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup)
if prefetcher is not None and not is_fetched:
# Check we didn't do this already
current_lookup = LOOKUP_SEP.join(attrs[0:level+1])
if current_lookup in done_queries:
obj_list = done_queries[current_lookup]
else:
obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and
descriptor in followed_descriptors):
for f in additional_prl:
new_prl = LOOKUP_SEP.join([current_lookup, f])
auto_lookups.append(new_prl)
done_queries[current_lookup] = obj_list
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_query_set().
Returns a 4 tuple containing:
(the object with get_prefetch_query_set (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
attr_found = False
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
try:
rel_obj = getattr(instance, attr)
attr_found = True
except AttributeError:
pass
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_query_set() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_query_set'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_query_set'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, attname):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_query_set() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name =\
prefetcher.get_prefetch_query_set(instances)
# We have to handle the possibility that the default manager itself added
# prefetch_related lookups to the QuerySet we just got back. We don't want to
# trigger the prefetch_related functionality by evaluating the query.
# Rather, we need to merge in the prefetch_related lookups.
additional_prl = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_prl:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
if rel_attr_val not in rel_obj_cache:
rel_obj_cache[rel_attr_val] = []
rel_obj_cache[rel_attr_val].append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
# Need to assign to single cache on instance
setattr(obj, cache_name, vals[0] if vals else None)
else:
# Multi, attribute represents a manager with an .all() method that
# returns a QuerySet
qs = getattr(obj, attname).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now, since we
# have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_prl
|
camptocamp/odoo
|
refs/heads/master
|
addons/resource/faces/resource.py
|
433
|
#@+leo-ver=4
#@+node:@file resource.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
#@<< Imports >>
#@+node:<< Imports >>
import pcalendar
import datetime
import utils
import string
import bisect
import plocale
#@-node:<< Imports >>
#@nl
_is_source = True
_to_datetime = pcalendar.to_datetime
_ = plocale.get_gettext()
#@+others
#@+node:_isattrib
#@+doc
#@nonl
# is used to find snapshot attributes
#@-doc
#@@code
def _isattrib(obj, a):
return a[0] != "_" \
and not callable(getattr(obj, a)) \
and not a.endswith("_members") \
and a not in ("name")
#@-node:_isattrib
#@+node:class ResourceCalendar
class ResourceCalendar(object):
"""
The resource calendar saves the load time of a resource.
Is ia sequence of time intervals of loads. An example of
such a sequence is:
[ (datetime.min, 0),
(2006/1/1, 1.0),
(2006/1/10, 0.5),
(2006/1/15, 0) ]
That means the resource:
is free till january the first 2006
is fully booked from january the first to january 10th
is half booked from january 10th to january 15th
is free since january 15th
"""
#@ @+others
#@+node:__init__
def __init__(self, src=None):
if src:
self.bookings = list(src.bookings)
else:
self.bookings = [ (datetime.datetime.min, 0) ]
#@-node:__init__
#@+node:__str__
def __str__(self):
return str(self.bookings)
#@-node:__str__
#@+node:__repr__
def __repr__(self):
return "<ResourceCalendar %s>" % (str(self))
#@-node:__repr__
#@+node:add_load
def add_load(self, start, end, load):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
# the load will be converted in an integer to avoid
# rouning problems
load = int(load * 10000)
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
left_load = 0
left_load = bookings[start_pos - 1][1]
if start_pos < len(bookings) and bookings[start_pos][0] == start:
prev_load = bookings[start_pos][1]
if prev_load + load == left_load:
del bookings[start_pos]
else:
bookings[start_pos] = (start, prev_load + load)
start_pos += 1
else:
bookings.insert(start_pos, (start, load + left_load))
start_pos += 1
item = (datetime.datetime.min, 0)
for i in range(start_pos, len(bookings)):
end_pos = i
item = bookings[i]
if item[0] >= end: break
bookings[i] = (item[0], item[1] + load)
else:
end_pos = len(bookings)
left_load = bookings[end_pos - 1][1]
if item[0] == end:
if item[1] == left_load:
del bookings[end_pos]
else:
bookings.insert(end_pos, (end, left_load - load))
#@-node:add_load
#@+node:end_of_booking_interval
def end_of_booking_interval(self, date):
date = _to_datetime(date)
bookings = self.bookings
date_item = (date, 999999)
date_pos = bisect.bisect_left(bookings, date_item) - 1
next_date = datetime.datetime.max
load = 0
try:
book_item = bookings[date_pos]
load = bookings[date_pos][1] / 10000.0
next_date = bookings[date_pos + 1][0]
except:
pass
return next_date, load
#@-node:end_of_booking_interval
#@+node:find_free_time
def find_free_time(self, start, length, load, max_load):
bookings = self.bookings
if isinstance(start, datetime.datetime):
adjust_date = _to_datetime
else:
adjust_date = start.calendar.EndDate
start = _to_datetime(start)
load = int(load * 10000)
max_load = int(max_load * 10000)
lb = len(bookings)
def next_possible(index):
while index < lb:
sd, lo = bookings[index]
if lo + load <= max_load:
break
index += 1
sd = adjust_date(max(start, sd))
ed = sd + length
end = _to_datetime(ed)
index += 1
while index < lb:
date, lo = bookings[index]
if date >= end:
#I found a good start date
return None, sd
if lo + load > max_load:
return index + 1, None
index += 1
return None, sd
start_item = (start, 1000000)
i = bisect.bisect_left(bookings, start_item) - 1
next_start = None
while not next_start and i < lb:
i, next_start = next_possible(i)
assert(next_start is not None)
return next_start
#@-node:find_free_time
#@+node:get_bookings
def get_bookings(self, start, end):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
if start_pos >= len(bookings) or bookings[start_pos][0] > start:
start_pos -= 1
end_item = (end, 0)
end_pos = bisect.bisect_left(bookings, end_item)
return start_pos, end_pos, bookings
#@-node:get_bookings
#@+node:get_load
def get_load(self, date):
date = _to_datetime(date)
bookings = self.bookings
item = (date, 100000)
pos = bisect.bisect_left(bookings, item) - 1
return bookings[pos][1] / 10000.0
#@-node:get_load
#@-others
#@-node:class ResourceCalendar
#@+node:class _ResourceBase
class _ResourceBase(object):
pass
#@-node:class _ResourceBase
#@+node:class _MetaResource
class _MetaResource(type):
doc_template = """
A resource class. The resources default attributes can
be changed when the class ist instanciated, i.e.
%(name)s(max_load=2.0)
@var max_load:
Specify the maximal allowed load sum of all simultaneously
allocated tasks of a resource. A ME{max_load} of 1.0 (default)
means the resource may be fully allocated. A ME{max_load} of 1.3
means the resource may be allocated with 30%% overtime.
@var title:
Specifies an alternative more descriptive name for the task.
@var efficiency:
The efficiency of a resource can be used for two purposes. First
you can use it as a crude way to model a team. A team of 5 people
should have an efficiency of 5.0. Keep in mind that you cannot
track the member of the team individually if you use this
feature. The other use is to model performance variations between
your resources.
@var vacation:
Specifies the vacation of the resource. This attribute is
specified as a list of date literals or date literal intervals.
Be aware that the end of an interval is excluded, i.e. it is
the first working date.
"""
#@ @+others
#@+node:__init__
def __init__(self, name, bases, dict_):
super(_MetaResource, self).__init__(name, bases, dict_)
self.name = name
self.title = dict_.get("title", name)
self._calendar = { None: ResourceCalendar() }
self._tasks = { }
self.__set_vacation()
self.__add_resource(bases[0])
self.__doc__ = dict_.get("__doc__", self.doc_template) % locals()
#@-node:__init__
#@+node:__or__
def __or__(self, other):
return self().__or__(other)
#@-node:__or__
#@+node:__and__
def __and__(self, other):
return self().__and__(other)
#@-node:__and__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, getattr(other, "name", None))
#@-node:__cmp__
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.name
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__set_vacation
def __set_vacation(self):
vacation = self.vacation
if isinstance(vacation, (tuple, list)):
for v in vacation:
if isinstance(v, (tuple, list)):
self.add_vacation(v[0], v[1])
else:
self.add_vacation(v)
else:
self.add_vacation(vacation)
#@-node:__set_vacation
#@+node:__add_resource
def __add_resource(self, base):
if issubclass(base, _ResourceBase):
members = getattr(base, base.__name__ + "_members", [])
members.append(self)
setattr(base, base.__name__ + "_members", members)
#@-node:__add_resource
#@+node:get_members
def get_members(self):
return getattr(self, self.__name__ + "_members", [])
#@-node:get_members
#@+node:add_vacation
def add_vacation(self, start, end=None):
start_date = _to_datetime(start)
if not end:
end_date = start_date.replace(hour=23, minute=59)
else:
end_date = _to_datetime(end)
for cal in self._calendar.itervalues():
cal.add_load(start_date, end_date, 1)
tp = Booking()
tp.start = start_date
tp.end = end_date
tp.book_start = start_date
tp.book_end = end_date
tp.work_time = end_date - start_date
tp.load = 1.0
tp.name = tp.title = _("(vacation)")
tp._id = ""
self._tasks.setdefault("", []).append(tp)
#@-node:add_vacation
#@+node:calendar
def calendar(self, scenario):
try:
return self._calendar[scenario]
except KeyError:
cal = self._calendar[scenario] = ResourceCalendar(self._calendar[None])
return cal
#@-node:calendar
#@-others
#@-node:class _MetaResource
#@+node:make_team
def make_team(resource):
members = resource.get_members()
if not members:
return resource
result = make_team(members[0])
for r in members[1:]:
result = result & make_team(r)
return result
#@-node:make_team
#@+node:class Booking
class Booking(object):
"""
A booking unit for a task.
"""
#@ << declarations >>
#@+node:<< declarations >>
book_start = datetime.datetime.min
book_end = datetime.datetime.max
actual = False
_id = ""
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task=None):
self.__task = task
#@-node:__init__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self._id, other._id)
#@-node:__cmp__
#@+node:path
def path(self):
first_dot = self._id.find(".")
return "root" + self._id[first_dot:]
path = property(path)
#@nonl
#@-node:path
#@+node:_idendity_
def _idendity_(self):
return self._id
#@-node:_idendity_
#@+node:__getattr__
def __getattr__(self, name):
if self.__task:
return getattr(self.__task, name)
raise AttributeError("'%s' is not a valid attribute" % (name))
#@-node:__getattr__
#@-others
#@-node:class Booking
#@+node:class ResourceList
class ResourceList(list):
#@ @+others
#@+node:__init__
def __init__(self, *args):
if args: self.extend(args)
#@-node:__init__
#@-others
#@-node:class ResourceList
#@+node:class Resource
class Resource(_ResourceBase):
#@ << declarations >>
#@+node:<< declarations >>
__metaclass__ = _MetaResource
__attrib_completions__ = {\
"max_load": 'max_load = ',
"title": 'title = "|"',
"efficiency": 'efficiency = ',
"vacation": 'vacation = [("|2002-02-01", "2002-02-05")]' }
__type_image__ = "resource16"
max_load = None # the maximum sum load for all task
vacation = ()
efficiency = 1.0
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
#@-node:__init__
#@+node:_idendity_
def _idendity_(cls):
return "resource:" + cls.__name__
_idendity_ = classmethod(_idendity_)
#@-node:_idendity_
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.__class__.__name__
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__call__
def __call__(self):
return self
#@-node:__call__
#@+node:__hash__
def __hash__(self):
return hash(self.__class__)
#@-node:__hash__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, other.name)
#@-node:__cmp__
#@+node:__or__
def __or__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _OrResourceGroup(self, other)
return result
#@-node:__or__
#@+node:__and__
def __and__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _AndResourceGroup(self, other)
return result
#@-node:__and__
#@+node:_permutation_count
def _permutation_count(self):
if hasattr(self, "_subresource"):
return self._subresource._permutation_count()
return 1
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
if hasattr(self, "_subresource"):
result = self._subresource._get_resources(state)
if self.name != "Resource":
result.name = self.name
if self.title != "Resource":
result.title = self.title
return result
result = ResourceList(self)
return result
#@-node:_get_resources
#@+node:all_members
def all_members(self):
if hasattr(self, "_subresource"):
return self._subresource.all_members()
return [ self.__class__ ]
#@-node:all_members
#@+node:unbook_tasks_of_project
def unbook_tasks_of_project(cls, project_id, scenario):
try:
task_list = cls._tasks[scenario]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for task_id, bookings in task_list.items():
if task_id.startswith(project_id):
for item in bookings:
add_load(item.book_start, item.book_end, -item.load)
del task_list[task_id]
if not task_list:
del cls._tasks[scenario]
unbook_tasks_of_project = classmethod(unbook_tasks_of_project)
#@-node:unbook_tasks_of_project
#@+node:unbook_task
def unbook_task(cls, task):
identdity = task._idendity_()
scenario = task.scenario
try:
task_list = cls._tasks[scenario]
bookings = task_list[identdity]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for b in bookings:
add_load(b.book_start, b.book_end, -b.load)
del task_list[identdity]
if not task_list:
del cls._tasks[scenario]
unbook_task = classmethod(unbook_task)
#@-node:unbook_task
#@+node:correct_bookings
def correct_bookings(cls, task):
#correct the booking data with the actual task data
try:
tasks = cls._tasks[task.scenario][task._idendity_()]
except KeyError:
return
for t in tasks:
t.start = task.start.to_datetime()
t.end = task.end.to_datetime()
correct_bookings = classmethod(correct_bookings)
#@-node:correct_bookings
#@+node:book_task
def book_task(cls, task, start, end, load, work_time, actual):
if not work_time: return
start = _to_datetime(start)
end = _to_datetime(end)
identdity = task._idendity_()
task_list = cls._tasks.setdefault(task.scenario, {})
bookings = task_list.setdefault(identdity, [])
add_load = cls.calendar(task.scenario).add_load
tb = Booking(task)
tb.book_start = start
tb.book_end = end
tb._id = identdity
tb.load = load
tb.start = _to_datetime(task.start)
tb.end = _to_datetime(task.end)
tb.title = task.title
tb.name = task.name
tb.work_time = int(work_time)
tb.actual = actual
bookings.append(tb)
result = add_load(start, end, load)
return result
book_task = classmethod(book_task)
#@-node:book_task
#@+node:length_of
def length_of(cls, task):
cal = task.root.calendar
bookings = cls.get_bookings(task)
return sum(map(lambda b: task._to_delta(b.work_time).round(), bookings))
length_of = classmethod(length_of)
#@-node:length_of
#@+node:done_of
def done_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_done(booking):
if booking.book_start >= now:
return 0
factor = 1
if booking.book_end > now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(cnow - start) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_done, bookings)))
#@-node:done_of
#@+node:todo_of
def todo_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_todo(booking):
if booking.book_end <= now:
return 0
factor = 1
if booking.book_start < now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(end - cnow) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_todo, bookings)))
#@-node:todo_of
#@+node:get_bookings
def get_bookings(cls, task):
return cls._tasks.get(task.scenario, {}).get(task._idendity_(), ())
get_bookings = classmethod(get_bookings)
#@-node:get_bookings
#@+node:get_bookings_at
def get_bookings_at(cls, start, end, scenario):
result = []
try:
items = cls._tasks[scenario].iteritems()
except KeyError:
return ()
for task_id, bookings in items:
result += [ booking for booking in bookings
if booking.book_start < end
and booking.book_end > start ]
vacations = cls._tasks.get("", ())
result += [ booking for booking in vacations
if booking.book_start < end
and booking.book_end > start ]
return result
get_bookings_at = classmethod(get_bookings_at)
#@-node:get_bookings_at
#@+node:find_free_time
def find_free_time(cls, start, length, load, max_load, scenario):
return cls.calendar(scenario).find_free_time(start, length, load, max_load)
find_free_time = classmethod(find_free_time)
#@-node:find_free_time
#@+node:get_load
def get_load(cls, date, scenario):
return cls.calendar(scenario).get_load(date)
get_load = classmethod(get_load)
#@-node:get_load
#@+node:end_of_booking_interval
def end_of_booking_interval(cls, date, task):
return cls.calendar(task.scenario).end_of_booking_interval(date)
end_of_booking_interval = classmethod(end_of_booking_interval)
#@-node:end_of_booking_interval
#@+node:snapshot
def snapshot(self):
from task import _as_string
def isattrib(a):
if a == "max_load" and self.max_load is None: return False
if a in ("name", "title", "vacation"): return False
return _isattrib(self, a)
attribs = filter(isattrib, dir(self))
attribs = map(lambda a: "%s=%s" % (a, _as_string(getattr(self, a))),
attribs)
return self.name + "(%s)" % ", ".join(attribs)
#@-node:snapshot
#@-others
#@-node:class Resource
#@+node:class _ResourceGroup
class _ResourceGroup(object):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.resources = []
for a in args:
self.__append(a)
#@-node:__init__
#@+node:all_members
def all_members(self):
group = reduce(lambda a, b: a + b.all_members(),
self.resources, [])
group = map(lambda r: (r, True), group)
group = dict(group)
group = group.keys()
return group
#@-node:all_members
#@+node:_permutation_count
def _permutation_count(self):
abstract
#@-node:_permutation_count
#@+node:_refactor
def _refactor(self, arg):
pass
#@-node:_refactor
#@+node:__append
def __append(self, arg):
if isinstance(arg, self.__class__):
self.resources += arg.resources
for r in arg.resources:
self._refactor(r)
return
elif isinstance(arg, Resource):
subresources = getattr(arg, "_subresource", None)
if subresources:
self.__append(subresources)
return
else:
self.resources.append(arg)
else:
assert(isinstance(arg, _ResourceGroup))
self.resources.append(arg)
self._refactor(arg)
#@-node:__append
#@+node:__str__
def __str__(self):
op = lower(self.__class__.__name__[0:-13])
return "(" + \
string.join([str(r) for r in self.resources],
" " + op + " ") + \
")"
#@-node:__str__
#@-others
#@-node:class _ResourceGroup
#@+node:class _OrResourceGroup
class _OrResourceGroup(_ResourceGroup):
#@ @+others
#@+node:_get_resources
def _get_resources(self, state):
for r in self.resources:
c = r._permutation_count()
if c <= state:
state -= c
else:
return r._get_resources(state)
assert(0)
#@-node:_get_resources
#@+node:_permutation_count
def _permutation_count(self):
return sum([ r._permutation_count() for r in self.resources])
#@-node:_permutation_count
#@-others
#@-node:class _OrResourceGroup
#@+node:class _AndResourceGroup
class _AndResourceGroup(_ResourceGroup):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.factors = [ 1 ]
_ResourceGroup.__init__(self, *args)
#@-node:__init__
#@+node:_refactor
def _refactor(self, arg):
count = arg._permutation_count()
self.factors = [ count * f for f in self.factors ]
self.factors.append(1)
#@-node:_refactor
#@+node:_permutation_count
#print "AndResourceGroup", count, arg, self.factors
def _permutation_count(self):
return self.factors[0]
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
"""delivers None when there are duplicate resources"""
result = []
for i in range(1, len(self.factors)):
f = self.factors[i]
substate = state / f
state %= f
result.append(self.resources[i - 1]._get_resources(substate))
result = ResourceList(*list(utils.flatten(result)))
dupl_test = { }
for r in result:
if dupl_test.has_key(r):
return None
else:
dupl_test[r] = 1
return result
#@-node:_get_resources
#@+node:_has_duplicates
def _has_duplicates(self, state):
resources = self._get_resources(state)
tmp = { }
for r in resources:
if tmp.has_key(r):
return True
tmp[r] = 1
return False
#@-node:_has_duplicates
#@-others
#@-node:class _AndResourceGroup
#@-others
#@-node:@file resource.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
RuiNascimento/krepo
|
refs/heads/master
|
script.areswizard/requests/packages/chardet/jpcntx.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
|
vishh/kubernetes
|
refs/heads/master
|
Godeps/_workspace/src/github.com/ugorji/go/codec/test.py
|
1139
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
stargaser/astropy
|
refs/heads/placeholder
|
astropy/visualization/wcsaxes/wcsapi.py
|
3
|
# Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BaseCoordinateFrame
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import RectangularFrame, EllipticalFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = ['transform_coord_meta_from_wcs', 'WCSWorld2PixelTransform',
'WCSPixel2WorldTransform']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
elif len(slices) != wcs.pixel_n_dim:
raise ValueError("'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})"
.format(wcs.pixel_n_dim))
is_fits_wcs = isinstance(wcs, WCS)
coord_meta = {}
coord_meta['name'] = []
coord_meta['type'] = []
coord_meta['wrap'] = []
coord_meta['unit'] = []
coord_meta['visible'] = []
coord_meta['format_unit'] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = 'scalar'
if axis_type is not None:
axis_type_split = axis_type.split('.')
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta['type'].append(coord_type)
coord_meta['wrap'].append(coord_wrap)
coord_meta['format_unit'].append(format_unit)
coord_meta['unit'].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace('-', '').lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4].replace('-', '').lower())
if name[0] == name[1]:
name = name[0:1]
if axis_type:
name.insert(0, axis_type)
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ''
coord_meta['name'].append(name)
coord_meta['default_axislabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticklabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticks_position'] = [''] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [name[0] if isinstance(name, (list, tuple)) else name for name in coord_meta['name']]
coord_meta['default_axis_label'] = [wcs.world_axis_names[i] or fallback_label for i, fallback_label in enumerate(fallback_labels)]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta['type'])):
coord_meta['visible'].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate('bltr'):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bltr'
elif frame_class is RectangularFrame1D:
derivs = np.abs(local_partial_pixel_derivatives(transform_wcs, *[0]*transform_wcs.pixel_n_dim,
normalize_by_world=False))[:, 0]
for i, spine_name in enumerate('bt'):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bt'
elif frame_class is EllipticalFrame:
if 'longitude' in coord_meta['type']:
lon_idx = coord_meta['type'].index('longitude')
coord_meta['default_axislabel_position'][lon_idx] = 'h'
coord_meta['default_ticklabel_position'][lon_idx] = 'h'
coord_meta['default_ticks_position'][lon_idx] = 'h'
if 'latitude' in coord_meta['type']:
lat_idx = coord_meta['type'].index('latitude')
coord_meta['default_axislabel_position'][lat_idx] = 'c'
coord_meta['default_ticklabel_position'][lat_idx] = 'c'
coord_meta['default_ticks_position'][lat_idx] = 'c'
else:
for index in range(len(coord_meta['type'])):
if index in world_map:
coord_meta['default_axislabel_position'][index] = frame_class.spine_names
coord_meta['default_ticklabel_position'][index] = frame_class.spine_names
coord_meta['default_ticks_position'][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if 'y' in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index('x') > slices.index('y')
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get('frame', ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} ")
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} ")
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
# At the moment, one has to manually check that the transformation
# round-trips, otherwise it should be considered invalid.
pixel_check = self.wcs.world_to_pixel_values(*world)
if self.wcs.pixel_n_dim == 1:
pixel_check = [pixel_check]
with np.errstate(invalid='ignore'):
invalid = np.zeros(len(pixel[0]), dtype=bool)
for ipix in range(len(pixel)):
invalid |= np.abs(pixel_check[ipix] - pixel[ipix]) > 1.
for iwrl in range(len(world)):
world[iwrl][invalid] = np.nan
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
|
sobjornstad/esc
|
refs/heads/master
|
esc/commands.py
|
1
|
"""
commands.py - implement menus, operations, and other things that show up
in the commands window
We use menus to register and keep track of the commands the user can choose.
Actual calculator functionality is defined in functions.py (and in user
plugins).
First come EscCommand and its subclasses, which implement both menus and
operations (which wrap the actual Python functions in functions.py that
perform calculations) in a composite tree pattern. Then come
faux-constructors (actually functions) which can be imported from
functions.py and called to register functions as operations. Through these
constructors, all registered operations and submenus end up reachable from
the main menu.
"""
from collections import OrderedDict
import decimal
from functools import wraps
from inspect import signature, Parameter
import itertools
from . import consts
from .functest import TestCase
from . import modes
from .oops import (FunctionExecutionError, InsufficientItemsError, NotInMenuError,
FunctionProgrammingError, ProgrammingError)
from . import util
BINOP = 'binop'
UNOP = 'unop'
class EscCommand:
"""
Base class for some esc functionality or operation the user can activate.
When the user activates this item, :meth:`execute` is called. Execution
takes any action associated with the item, throwing an exception if
something didn't work right. It then returns the menu that the interface
should return to. A return value of None returns to the main menu.
"""
#: Class variable describing whether this class is a menu or not.
# External code may occasionally need to know the difference.
is_menu = False
def __init__(self, key, description):
#: The key used to activate this item on its parent menu.
self.key = key
#: How this item is described on its parent menu.
self.description = description
#: An :class:`EscCommand` (hopefully a menu) this item is contained in.
self.parent = None
#: Mapping from keys to :class:`EscCommand`\ s on the current menu,
#: if this is a menu. Add to this using :meth:`register_child()`, not directly.
self.children = OrderedDict()
@property
def help_title(self):
"""
The title this command should show in the help system. This is the
access key and description if a description is defined; otherwise it
is just the access key.
"""
if self.description:
return f"{self.key} ({self.description})"
else:
return self.key
@property
def signature_info(self):
"""An iterable of strings to display under the "Signature" section in help."""
raise NotImplementedError
def execute(self, access_key, ss, registry):
"""
Execute this EscCommand. For operations or builtins, this involves
the class doing its own work; for menus, this returns the child
defined by *access_key*.
:return: An instance of :class:`EscCommand` representing the menu
the UI should now return to,
or ``None`` to indicate the main menu.
"""
raise NotImplementedError
def register_child(self, child):
"""
Register a new child :class:`EscCommand` of this menu (either a menu
or an operation). This operation doesn't make sense for
:class:`EscOperation` instances; the caller should avoid doing this.
"""
if child.key in self.children:
conflicting = self.children[child.key].description
raise ProgrammingError(
f"Cannot add '{child.description}' as a child of '{self.description}':"
f" the access key '{child.key}' is already in use for '{conflicting}'.")
child.parent = self
self.children[child.key] = child
def simulated_result(self, ss, registry): # pylint: disable=no-self-use, unused-argument
"""
Execute this command against the given stack state and registry, but
instead of actually changing the state, return a string describing
the result.
May return ``None`` if the :class:`EscCommand` does not change the
stack state (e.g., a menu).
"""
return None
def test(self):
"""
Execute any self-tests associated with this :class:`EscCommand`.
If a test fails, raise a
:class:`ProgrammingError <esc.oops.ProgrammingError>`.
"""
raise NotImplementedError
class EscMenu(EscCommand):
"""
A type of EscCommand that serves as a container for other menus
and operations. Executing it activates a child item.
"""
is_menu = True
def __init__(self, key, description, doc, mode_display=None):
super().__init__(key, description)
self.__doc__ = doc
#: An optional callable whose return value will be shown under the menu title.
self.mode_display = mode_display
def __repr__(self):
return (f"<EscMenu '{self.key}': [" +
", ".join(repr(i) for i in self.children.values()) +
"]>")
@property
def signature_info(self):
"Constant string that describes the menu as a menu."
return (" Type: Menu (categorizes operations)",)
@property
def is_main_menu(self):
"This is the main menu if it has no parent."
return self.parent is None
@property
def anonymous_children(self):
"Iterable of children without a description."
for i in self.children.values():
if not i.description:
yield i
@property
def named_children(self):
"Iterable of children with a description."
for i in self.children.values():
if i.description:
yield i
def child(self, access_key):
"""
Return the child defined by *access_key*.
Raises :class:`NotInMenuError <esc.oops.NotInMenuError>`
if it doesn't exist.
"""
try:
return self.children[access_key]
except KeyError:
raise NotInMenuError(access_key)
def execute(self, access_key, ss, registry):
"""
Look up the child described by *access_key* and execute it. If said
child is a menu, return it (so the user can choose an item from that
menu). Otherwise, execute the child immediately.
:param access_key: A menu access key indicating which child to execute.
:param ss: The current stack state, passed through to a child operation.
:param registry: The current registry, passed through to a child operation.
:return: The :class:`EscMenu` to display next,
or ``None`` to return to the main menu.
This will be a child menu, if one was selected,
or None if an operation runs.
:raises: :class:`FunctionExecutionError <esc.oops.FunctionExecutionError>`
or a subclass, if a child operation was selected
but does not complete successfully.
If the user chose the special quit command, return to the previous
menu, or raise ``SystemExit`` if this is the main menu.
"""
if access_key == consts.QUIT_CHARACTER:
if self.is_main_menu:
raise SystemExit(0)
else:
return self.parent
child = self.child(access_key)
if child.is_menu:
return child
else:
return child.execute(access_key, ss, registry)
def test(self):
"Execute the test method of all children."
old_testing = consts.TESTING
consts.TESTING = True
try:
for child in self.children.values():
child.test()
finally:
consts.TESTING = old_testing
class EscOperation(EscCommand):
"""
A type of EscCommand that can be run to make some changes on the stack.
"""
# pylint: disable=too-many-arguments
def __init__(self, key, func, pop, push, description, menu, retain=False,
log_as=None, simulate=True):
super().__init__(key, description)
self.parent = menu
#: The function, decorated with :func:`@Operation <Operation>`,
#: that defines the logic of this operation.
self.function = func
#: The number of items the function gets from the bottom of the stack.
#: ``-1`` indicates the entire stack is popped.
self.pop = pop
#: The number of items the function returns to the stack.
#: ``-1`` indicates a variable number of items will be returned.
self.push = push
#: If true, items pulled from the stack before execution won't be removed.
self.retain = retain
#: A description of how to log this function's execution
#: (see the docs for :func:`@Operation <Operation>`
#: for details on allowable values).
self.log_as = log_as
#: Whether this function should be run when a simulation is requested for
#: help purposes. Turn off if the function is slow or has side effects.
self.simulate_allowed = simulate
def __repr__(self):
return f"<EscOperation '{self.key}': {self.description}"
@property
def __doc__(self):
if self.function.__doc__ is None:
return "The author of this operation has not provided a description."
else:
return self.function.__doc__
@property
def signature_info(self):
"""
A description of the function's signature as a tuple of strings
(one per line to display in the help system),
based on the :attr:`pop` and :attr:`push` values.
"""
items = "item" if self.pop == 1 else "items"
results = "result" if self.push == 1 else "results"
type_ = f" Type: Operation (performs calculations)"
if self.pop == -1:
input_ = f" Input: entire stack"
else:
input_ = f" Input: {self.pop} {items} from the stack"
if self.retain:
input_ += " (will remain)"
if self.push == -1:
output = " Output: any number of items"
elif self.push == 0:
output = " Output: no output"
else:
output = f" Output: {self.push} {results} added to the stack"
return (type_, input_, output)
def _describe_operation(self, args, retvals, registry):
"""
Given the values popped from the stack (args) and the values pushed
back to the stack (retvals), return a string describing what was done.
"""
if self.log_as is None:
return self.description
elif self.log_as == UNOP:
try:
return f"{self.description} {args[0]} = {retvals[0]}"
except IndexError:
raise FunctionProgrammingError(
operation=self,
problem="requested unary operator logging (UNOP) but did not "
"request any values from the stack")
elif self.log_as == BINOP:
try:
return f"{args[0]} {self.key} {args[1]} = {retvals[0]}"
except IndexError:
raise FunctionProgrammingError(
operation=self,
problem="requested binary operator logging (BINOP) but did not "
"request two values from the stack")
elif callable(self.log_as):
return util.magic_call(
self.log_as,
{'args': args, 'retval': retvals, 'registry': registry})
else:
return self.log_as.format(*itertools.chain(args, retvals))
def _insufficient_items_on_stack(self, pops_requested=None):
"Call for a FunctionExecutionError() if the stack is too empty."
if pops_requested is None:
pops_requested = self.pop
assert pops_requested != -1 # caller needs to reset the value if it is
pops = 'item' if pops_requested == 1 else 'items'
msg = f"'{self.key}' needs at least {pops_requested} {pops} on stack."
return InsufficientItemsError(pops_requested, msg)
def _retrieve_arguments(self, ss):
"""
Get a slice of stack from /ss/ of the size requested by the function
we're calling, throwing an exception if this can't be completed.
"""
# Enter the number currently being edited, if any, stopping if it is
# invalid.
try:
ss.enter_number(running_op=self.key)
except ValueError as e:
raise FunctionExecutionError(str(e))
# Make sure there will be space to push the results.
# If requesting the whole stack, it's the function's responsibility to check.
if not ss.has_push_space(self.push - self.pop) and self.pop != -1:
num_short = self.push - self.pop - ss.free_stack_spaces
spaces = 'space' if num_short == 1 else 'spaces'
msg = f"'{self.key}': stack is too full (short {num_short} {spaces})."
raise FunctionExecutionError(msg)
if self.pop == -1:
# Whole stack requested; will push the whole stack back later.
args = ss.s[:]
if not self.retain:
ss.clear()
else:
args = ss.pop(self.pop, retain=self.retain)
if (not args) and self.pop != 0:
raise self._insufficient_items_on_stack()
return args
def _simulated_description(self, args, log, results):
"""
Return a list of strings to display in esc's interface to describe an
operation that takes /args/, produces a log message of /log/, and
outputs /results/.
"""
description = [f"This calculation would occur:",
f" {log}"]
if self.retain:
description.append("The following stack items would be read as input:")
else:
description.append("The following stack items would be consumed:")
if args:
for i in args:
description.append(f" {i}")
else:
description.append(" (none)")
description.append("The following results would be returned:")
if results:
for i in results:
description.append(f" {i}")
else:
description.append(" (none)")
return description
def _store_results(self, ss, args, return_values, registry):
"""
Return the values computed by our function to the stack
and record the operation in a history entry.
"""
if self.push > 0 or (self.push == -1 and return_values is not None):
if not hasattr(return_values, '__iter__'):
return_values = (return_values,)
try:
coerced_retvals = util.decimalize_iterable(return_values)
except (decimal.InvalidOperation, TypeError) as e:
raise FunctionProgrammingError(
operation=self,
problem="returned a value that cannot be converted "
"to a Decimal") from e
ss.push(coerced_retvals,
self._describe_operation(args, return_values, registry))
else:
ss.record_operation(self._describe_operation(args, (), registry))
def execute(self, access_key, ss, registry): # pylint: disable=useless-return
"""
Execute the esc operation wrapped by this instance on the given stack
state and registry.
:param access_key: Not used by this subclass.
:param ss: The current stack state, passed through to a child operation.
:param registry: The current registry, passed through to a child operation.
:return: A constant ``None``,
indicating that we go back to the main menu.
:raises: :class:`FunctionExecutionError <esc.oops.FunctionExecutionError>`
or a subclass,
if the operation cannot be completed successfully.
"""
with ss.transaction():
args = self._retrieve_arguments(ss)
try:
retvals = self.function(args, registry)
except ValueError:
# illegal operation; restore original args to stack and return
raise FunctionExecutionError("Domain error! Stack unchanged.")
except ZeroDivisionError:
raise FunctionExecutionError(
"Sorry, division by zero is against the law.")
except decimal.InvalidOperation:
raise FunctionExecutionError(
"That operation is not defined by the rules of arithmetic.")
except InsufficientItemsError as e:
raise self._insufficient_items_on_stack(e.number_required)
self._store_results(ss, args, retvals, registry)
return None # back to main menu
def simulated_result(self, ss, registry):
"""
Execute the operation on the provided `StackState`,
but don't actually change the state --
instead, provide a description of what would happen.
"""
if not self.simulate_allowed:
return ("The author of this operation has disabled", "simulations.")
used_args = ss.last_n_items(self.pop)
checkpoint = ss.memento()
try:
self.execute(None, ss, registry)
results = ss.last_n_items(self.push)
log_message = ss.last_operation
except InsufficientItemsError as e:
items = "item is" if e.number_required == 1 else "items are"
return (
f"An error would occur. (At least {e.number_required} stack {items}",
f"needed to run this function.)")
except FunctionExecutionError:
return ("An error would occur. (Most likely the values on ",
"the stack are not valid.)")
finally:
ss.restore(checkpoint)
return self._simulated_description(used_args, log_message, results)
def test(self):
r"""
If the function on this :class:`EscOperation` has associated
:class:`TestCase <esc.functest.TestCase>`\ s
defined in its *tests* attribute,
execute those tests.
"""
# Some internal functions that are registered, such as mode changes,
# don't have a tests attribute. We want to ignore those.
if hasattr(self.function, 'tests'):
for test_case in self.function.tests:
test_case.execute(self)
class EscBuiltin(EscCommand):
r"""
Mock class for built-in commands. Built-in :class:`EscCommand`\ s do not
actually get run and do anything -- they are special-cased because they
need access to internals normal commands cannot access. However, it's
still useful to have classes for them as stand-ins for things like
retrieving help.
Unlike the other :class:`EscCommand`\ s, each :class:`EscBuiltin` has its
own subclass rather than its own instance, as they each need special
behaviors. The subclasses are defined in the
:mod:`builtin_stubs <esc.builtin_stubs>` module.
Subclasses should override the docstring
and the :meth:`simulated_result` method.
Subclasses should also define :attr:`key <esc.commands.EscCommand.key>`
and :attr:`description <esc.commands.EscCommand.description>`
as class variables. They'll be shadowed by instance variables once we
instantiate the class, but the values will be the same. That sounds dumb,
but it makes sense for all other classes in the hierarchy and doesn't
hurt us here. We don't want to define them in the ``__init__`` of each
subclass because then we have to instantiate every class to match on them
by key (see the reflective search in ``esc.helpme``).
"""
def __init__(self):
super().__init__(self.key, self.description)
self.is_menu = False
def execute(self, access_key, ss, registry): # pylint: disable=useless-return
"Executing a builtin does nothing."
def simulated_result(self, ss, registry):
"Reimplemented by each subclass."
raise NotImplementedError
def test(self):
"Testing a builtin with esc's function test feature does nothing."
@property
def signature_info(self):
"Constant string that describes the built-in as a built-in."
type_ = f" Type: Built-in (performs special esc actions)"
return (type_,)
### Main menu ###
# As I write this, if the user ever sees this docstring, something's probably
# gone wrong, since there's no way to choose the main menu from a menu and thus
# get its help, but in the interest of future-proofing, we'll say something
# interesting.
MAIN_DOC = """
The main menu. All other esc functions and menus are eventually accessible
from this menu.
"""
# We have to define the main menu somewhere so we can get at the operations and
# menus on it. Files of functions will ultimately need to import this menu to
# register anything useful.
main_menu = EscMenu('', "Main Menu", doc=MAIN_DOC) # pylint: disable=invalid-name
### Constructor/registration functions ###
def Menu(key, description, parent, doc, mode_display=None): # pylint: disable=invalid-name
"""
Register a new submenu of an existing menu.
:param key: The keyboard key used to select this menu from its parent.
:param description: A short description of this menu to show beside the key.
:param parent:
An :class:`EscMenu` to add this menu to.
This may be ``esc.commands.main_menu`` or another menu.
:param doc:
A string describing the menu, to be used in the help system.
This should be something like the docstring
of an operation function.
:param mode_display:
An optional callable returning a string whose value will be shown
beneath the name of the menu when the menu is open.
Ordinarily, this is used to show the current value of any modes
that apply to the functions on the menu.
:return: A new :class:`EscMenu`.
"""
menu = EscMenu(key, description, doc, mode_display)
parent.register_child(menu)
return menu
def Constant(value, key, description, menu): # pylint: disable=invalid-name
"""
Register a new constant. Constants are just exceedingly boring operations
that pop no values and push a constant value,
so this is merely syntactic sugar.
:param value: The value of the constant,
as a Decimal or a value that can be converted to one.
:param key: The key to press to select the constant from the menu.
:param description: A brief description to show next to the *key*.
:param menu: A :class:`Menu <EscMenu>` to place this function on.
"""
@Operation(key=key, menu=menu, push=1, description=description,
log_as=f"insert constant {description}")
def func():
return value
# You can't define a dynamic docstring from within the function.
func.__doc__ = f"Add the constant {description} = {value} to the stack."
def Operation(key, menu, push, description=None, retain=False, log_as=None, simulate=True): # pylint: disable=invalid-name
"""
Decorator to register a function on a menu
and make it available for use as an esc operation.
:param key:
The key on the keyboard to press
to trigger this operation on the menu.
:param menu:
The :class:`Menu <EscMenu>` to place this operation on.
The simplest choice is ``main_menu``,
which you can import from :mod:``esc.commands``.
:param push:
The number of items the decorated function
will return to the stack on success.
``0`` means nothing is ever returned;
``-1`` means a variable number of things are returned.
:param description:
A very brief description of the operation this function implements,
to be displayed next to it on the menu.
If this is ``None`` (the default), the operation is "anonymous"
and will be displayed at the top of the menu with just its *key*.
:param retain:
If ``True``, the items bound to this function's arguments
will remain on the stack on successful execution.
The default is ``False``
(meaning the function's return value replaces
whatever was there before --
the usual behavior of an RPN calculator).
:param log_as:
A specification describing what appears
in the :guilabel:`History` window after executing this function.
It may be ``None`` (the default), ``UNOP`` or ``BINOP``,
a .format() string, or a callable.
* If it is ``None``, the *description* is used.
* If it is the module constant ``esc.commands.UNOP``
or ``esc.commands.BINOP``,
the log string is a default suitable
for many unary or binary operations:
for ``UNOP`` it is
:samp:`{description} {argument} = {return}`
and for ``BINOP`` it is
:samp:`{argument} {key} {argument} = {return}`.
.. note::
If the function being decorated does not take one or two arguments,
respectively,
using ``UNOP`` or ``BINOP`` will raise a
:class:`ProgrammingError <esc.oops.ProgrammingError>`.
* If it is a format string, positional placeholders are replaced
with the parameters to the function in sequence,
then the return values.
Thus, a function with two arguments ``bos`` and ``sos``
returning a tuple of two values replaces
``{0}`` with ``bos``, ``{1}`` with ``sos``,
and ``{2}`` and ``{3}`` with the two return values.
* If it is a callable, the parameters will be examined and bound
by name to the following (none of these parameters are required,
but arguments other than these will raise a
:class:`ProgrammingError <esc.oops.ProgrammingError>`).
:args: a list of the arguments the function requested
:retval: a list of values the function returned
:registry: the current :class:`Registry <esc.registers.Registry>` instance
The function should return an appropriate string.
:param simulate:
If ``True`` (the default), function execution will be simulated
when the user looks at the help page for the function,
so they can see what would happen to the stack
if they actually chose the function.
You should disable this option
if your function is extremely slow or has side effects
(e.g., changing the system clipboard, editing registers).
In addition to placing the function on the menu,
the function is wrapped with the following magic.
1. Function parameters are bound according to the following rules:
* Most parameters are bound
to a slice of values at the bottom of the stack, by position.
If the function has one parameter,
it receives :ref:`bos <Terminology and notation>`;
if the function has two parameters,
the first receives sos and the second bos;
and so on.
The parameters can have any names (see exceptions below).
Using ``bos`` and ``sos`` is conventional for general operations,
but if the operation is implementing some kind of formula,
it may be more useful to name the parameters
for their meaning in the formula.
* By default, passed parameters are of type `Decimal`_.
If the parameter name ends with ``_str``,
it instead receives a string representation
(this is exactly what shows up in the calculator window,
so it's helpful when doing something display-oriented
like copying to the clipboard).
If the parameter name ends with ``_stackitem``,
it receives the complete :class:`StackItem <esc.stack.StackItem>`,
containing both of those representations and a few other things besides.
* A varargs parameter, like ``*args``,
receives the entire contents of the stack as a tuple.
This is invalid with any other parameters except ``registry``.
The ``_str`` and ``_stackitem`` suffixes still work.
Again, it can have any name; ``*stack`` is conventional for esc operations.
* The special parameter name ``registry``
receives a :class:`Registry <esc.registers.Registry>` instance
containing the current state of all registers.
Using this parameter is generally discouraged;
see :ref:`Registry` for details.
* The special parameter name ``testing``
receives a boolean describing whether the current execution is a test
(see :func:`esc.functest.TestCase`).
This can be useful if your function has side effects
that you don't want to execute during testing,
but you'd still like to test the rest of the function.
2. The function has a callable attached to it as an attribute,
called ``ensure``, which can be used to test the function at startup
to ensure the function never stops calculating the correct answers
due to updates or other issues:
.. code-block:: python
def add(sos, bos):
return sos + bos
add.ensure(before=[1, 2, 3], after=[1, 5])
See :class:`TestCase <esc.functest.TestCase>`
for further information on this testing feature.
"""
def function_decorator(func):
sig = signature(func)
parms = sig.parameters.values()
bind_all = [i for i in parms if i.kind == Parameter.VAR_POSITIONAL]
stack_parms = [i for i in parms if i.name not in ('registry', 'testing')]
pop = len(stack_parms) if not bind_all else -1
def _bind_stack_parm(stack_item, parm):
if parm.name.endswith('_stackitem'):
return stack_item
if parm.name.endswith('_str'):
return stack_item.string
else:
return stack_item.decimal
@wraps(func)
def wrapper(stack, registry):
positional_binding = []
keyword_binding = {}
if bind_all:
positional_binding.extend(_bind_stack_parm(stack_item, bind_all[0])
for stack_item in stack)
else:
stack_slice = stack[-(len(stack_parms)):]
keyword_binding.update({parm.name: _bind_stack_parm(stack_item, parm)
for stack_item, parm
in zip(stack_slice, stack_parms)})
if 'registry' in (i.name for i in parms):
keyword_binding['registry'] = registry
if 'testing' in (i.name for i in parms):
keyword_binding['testing'] = consts.TESTING
return func(*positional_binding, **keyword_binding)
# Add test definition functionality.
def ensure(before, after=None, raises=None, close=False):
tc = TestCase(before, after, raises, close)
wrapper.tests.append(tc)
wrapper.ensure = ensure
wrapper.tests = []
# Create a new EscOperation instance and place it on the menu.
op = EscOperation(key=key, func=wrapper, pop=pop, push=push,
description=description, menu=menu, log_as=log_as,
retain=retain, simulate=simulate)
menu.register_child(op)
# Return the wrapped function to functions.py to complete
# the decorator protocol.
return wrapper
return function_decorator
def Mode(name, default_value, allowable_values=None): # pylint: disable=invalid-name
"""
Register a new mode.
:param name:
The name of the mode. This is used to refer to it in code.
If a mode with this name already exists,
a :class:`ProgrammingError <esc.oops.ProgrammingError>` will be raised.
:param default_value: The value the mode starts at.
:param allowable_values:
An optional sequence of possible values for the mode.
If defined, if code ever tries to set a different value,
a :class:`ProgrammingError <esc.oops.ProgrammingError>` will be raised.
"""
return modes.register(name, default_value, allowable_values)
def ModeChange(key, description, menu, mode_name, to_value): # pylint: disable=invalid-name
"""
Create a new mode change operation the user can select from a menu.
Syntactic sugar for registering an operation.
:param key: The key to press to select the constant from the menu.
:param description: A brief description to show next to the *key*.
:param menu: A :class:`Menu <EscMenu>` to place this operation on.
:param mode_name: The name of the mode, registered with :func:`Mode`,
to set.
:param to_value: The value the mode will be set to
when this operation is selected.
"""
op = EscOperation(key=key, func=lambda _, __: modes.set(mode_name, to_value),
pop=0, push=0, description=description, menu=menu)
menu.register_child(op)
|
jrief/easy-thumbnails
|
refs/heads/postprocessor
|
easy_thumbnails/migrations/0012_build_storage_hashes.py
|
15
|
# encoding: utf-8
import datetime
import hashlib
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.core.files.storage import default_storage
import pickle
class Migration(DataMigration):
"""
Migrate storage hashes.
"""
def get_storage_hash(self, storage):
"""
Return a hex string hash for a storage object (or string containing a
pickle of a storage object).
"""
try:
# Make sure that pickle is getting a string, since it can choke
# with unicode.
storage_obj = pickle.loads(str(self.pickle))
except:
# We need to return some storage, and if there's an exception then
# it is most likely the default_storage (since that fails with a
# recursion error due to LazyObject "awesomeness").
storage_obj = default_storage
storage_cls = storage_obj.__class__
name = '%s.%s' % (storage_cls.__module__, storage_cls.__name__)
return hashlib.md5(name).hexdigest()
def forwards(self, orm):
"Write your forwards methods here."
for storage in orm.Storage.objects.all():
storage_hash = self.get_storage_hash(storage)
orm.Source.objects.filter(storage=storage).update(
storage_hash=storage_hash)
orm.Thumbnail.objects.filter(storage=storage).update(
storage_hash=storage_hash)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'easy_thumbnails.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 8, 0, 32, 41, 855399)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'storage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.Storage']"}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'easy_thumbnails.storage': {
'Meta': {'object_name': 'Storage'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickle': ('django.db.models.fields.TextField', [], {})
},
'easy_thumbnails.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 8, 0, 32, 41, 855399)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': "orm['easy_thumbnails.Source']"}),
'storage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.Storage']"}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['easy_thumbnails']
|
foobarbazblarg/stayclean
|
refs/heads/master
|
stayclean-2020-december/venv/lib/python3.8/site-packages/pip/__main__.py
|
17
|
from __future__ import absolute_import
import os
import sys
# Remove '' and current working directory from the first entry
# of sys.path, if present to avoid using current directory
# in pip commands check, freeze, install, list and show,
# when invoked as python -m pip <command>
if sys.path[0] in ('', os.getcwd()):
sys.path.pop(0)
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
from pip._internal.cli.main import main as _main # isort:skip # noqa
if __name__ == '__main__':
sys.exit(_main())
|
has2k1/plotnine
|
refs/heads/master
|
plotnine/themes/theme_classic.py
|
1
|
from .elements import element_line, element_rect, element_blank
from .theme import theme
from .theme_bw import theme_bw
class theme_classic(theme_bw):
"""
A classic-looking theme, with x & y axis lines and
no gridlines.
Parameters
----------
base_size : int, optional
Base font size. All text sizes are a scaled versions of
the base font size. Default is 11.
base_family : str, optional
Base font family.
"""
def __init__(self, base_size=11, base_family=None):
theme_bw.__init__(self, base_size, base_family)
self.add_theme(
theme(panel_border=element_blank(),
axis_line=element_line(color='black'),
panel_grid_major=element_line(),
panel_grid_major_x=element_blank(),
panel_grid_major_y=element_blank(),
panel_grid_minor=element_line(),
panel_grid_minor_x=element_blank(),
panel_grid_minor_y=element_blank(),
strip_background=element_rect(
colour='black', fill='None', size=1),
legend_key=element_blank()),
inplace=True)
|
cynrd/blog
|
refs/heads/master
|
sokoban_astar/move.py
|
1
|
from state import State
class Move:
def __init__(self, walls):
self.walls = walls
def get_state(self, state):
if not self.can_move_in_direction(state):
return None
target_player_pos = (self.next_row(state), self.next_col(state))
if self.walls[target_player_pos[0]][target_player_pos[1]] == 1:
return None
if target_player_pos not in state.boxes:
return State(target_player_pos, state.boxes)
if target_player_pos in state.boxes:
target_box_pos = (self.next_next_row(state), self.next_next_col(state))
if self.can_move_box_in_direction(state) and \
self.walls[target_box_pos[0]][target_box_pos[1]] == 0 and \
target_box_pos not in state.boxes:
boxes_copy = list(state.boxes)
boxes_copy.remove(target_player_pos)
boxes_copy.append(target_box_pos)
return State(target_player_pos, boxes_copy)
def can_move_in_direction(self, state):
raise NotImplementedError
def next_next_row(self, state):
raise NotImplementedError
def next_col(self, state):
raise NotImplementedError
def next_row(self, state):
raise NotImplementedError
def next_next_col(self, state):
raise NotImplementedError
def can_move_box_in_direction(self, state):
raise NotImplementedError
class Up(Move):
@staticmethod
def can_move_in_direction(state):
return state.player[0] > 0
@staticmethod
def can_move_box_in_direction(state):
return state.player[0] > 1
@staticmethod
def next_row(state):
return state.player[0]-1
@staticmethod
def next_col(state):
return state.player[1]
@staticmethod
def next_next_row(state):
return state.player[0]-2
@staticmethod
def next_next_col(state):
return state.player[1]
class Down(Move):
def can_move_in_direction(self, state):
return state.player[0] < len(self.walls)-1
def can_move_box_in_direction(self, state):
return state.player[0] < len(self.walls)-2
@staticmethod
def next_row(state):
return state.player[0]+1
@staticmethod
def next_col(state):
return state.player[1]
@staticmethod
def next_next_row(state):
return state.player[0]+2
@staticmethod
def next_next_col(state):
return state.player[1]
class Left(Move):
@staticmethod
def can_move_in_direction(state):
return state.player[1] > 0
@staticmethod
def can_move_box_in_direction(state):
return state.player[1] > 1
@staticmethod
def next_row(state):
return state.player[0]
@staticmethod
def next_col(state):
return state.player[1]-1
@staticmethod
def next_next_row(state):
return state.player[0]
@staticmethod
def next_next_col(state):
return state.player[1]-2
class Right(Move):
def can_move_in_direction(self, state):
return state.player[1] < len(self.walls[0])-1
def can_move_box_in_direction(self, state):
return state.player[1] < len(self.walls[0])-2
@staticmethod
def next_row(state):
return state.player[0]
@staticmethod
def next_col(state):
return state.player[1]+1
@staticmethod
def next_next_row(state):
return state.player[0]
@staticmethod
def next_next_col(state):
return state.player[1]+2
|
bireme/django-fossil
|
refs/heads/master
|
fossil/tests/test_models.py
|
1
|
import datetime
from django.db import models, connection
from django.core.management.color import no_style
from django.contrib.flatpages.models import FlatPage
from django.contrib.contenttypes import generic
from django.db.models.sql.query import setup_join_cache
from django.contrib.auth.models import User
from fossil.fields import FossilForeignKey
class Supplier(models.Model):
class Meta:
app_label = 'reversion_relations'
name = models.CharField(max_length=100)
location = models.TextField(blank=True)
salary = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
starred = models.BooleanField(default=False, blank=True)
points = models.IntegerField(default=0, blank=True)
date_foundation = models.DateField(blank=True, null=True)
def __unicode__(self):
return self.name
class Purchase(models.Model):
class Meta:
app_label = 'reversion_relations'
date = models.DateTimeField(blank=True, default=datetime.datetime.now)
supplier = FossilForeignKey(Supplier, null=True, blank=True)
user = models.ForeignKey(User, null=True, blank=True)
setup_join_cache(Supplier)
setup_join_cache(Purchase)
def create_tables():
cursor = connection.cursor()
style = no_style()
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
sql, references = connection.creation.sql_create_model(Supplier, style, seen_models)
new_sql, new_ref = connection.creation.sql_create_model(Purchase, style, seen_models)
sql.extend(new_sql); references.update(new_ref)
pending_references = {}
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(Supplier, style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(Purchase, style, pending_references))
for statement in sql:
cursor.execute(statement)
|
linebp/pandas
|
refs/heads/master
|
pandas/core/common.py
|
1
|
"""
Misc tools for implementing data structures
"""
import sys
import warnings
from datetime import datetime, timedelta
from functools import partial
import inspect
import collections
import numpy as np
from pandas._libs import lib, tslib
from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import _NS_DTYPE
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isnull
from pandas.api import types
from pandas.core.dtypes import common
# compat
from pandas.errors import ( # noqa
PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError)
# back-compat of public API
# deprecate these functions
m = sys.modules['pandas.core.common']
for t in [t for t in dir(types) if not t.startswith('_')]:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"import from the public API: "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(types, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# back-compat for non-public functions
# deprecate these functions
for t in ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"These are not longer public API functions, "
"but can be imported from "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(common, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# deprecate array_equivalent
def array_equivalent(*args, **kwargs):
warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
"is no longer public API", DeprecationWarning, stacklevel=2)
from pandas.core.dtypes import missing
return missing.array_equivalent(*args, **kwargs)
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
return ("This method must be defined in the concrete class of %s" %
self.class_instance.__class__.__name__)
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslib.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslib.Timedelta(value)
return value
_values_from_object = lib.values_from_object
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import RangeIndex
return RangeIndex(0, n, name=None)
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _not_none(*args):
return (arg for arg in args if arg is not None)
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _count_not_none(*args):
return sum(x is not None for x in args)
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def iterpairs(seq):
"""
Parameters
----------
seq : sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> list(iterpairs([1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def is_full_slice(obj, l):
""" we have a full length slice """
return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
obj.step is None)
def _get_callable_name(obj):
# typical case has name
if hasattr(obj, '__name__'):
return getattr(obj, '__name__')
# some objects don't; could recurse
if isinstance(obj, partial):
return _get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, '__call__'):
return obj.__class__.__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return dict((_maybe_box_datetimelike(key), value)
for key, value in iteritems(d))
def standardize_mapping(into):
"""
Helper function to standardize a supplied mapping.
.. versionadded:: 0.21.0
Parameters
----------
into : instance or subclass of collections.Mapping
Must be a class, an initialized collections.defaultdict,
or an instance of a collections.Mapping subclass.
Returns
-------
mapping : a collections.Mapping subclass or other constructor
a callable object that can accept an iterator to create
the desired Mapping.
See Also
--------
DataFrame.to_dict
Series.to_dict
"""
if not inspect.isclass(into):
if isinstance(into, collections.defaultdict):
return partial(
collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, collections.Mapping):
raise TypeError('unsupported type: {}'.format(into))
elif into == collections.defaultdict:
raise TypeError(
'to_dict() only accepts initialized defaultdicts')
return into
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if types.is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
|
obreitwi/nest-simulator
|
refs/heads/master
|
doc/nest_by_example/scripts/one_neuron_with_sine_wave.py
|
4
|
# -*- coding: utf-8 -*-
#
# one_neuron_with_sine_wave.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
neuron = nest.Create('iaf_neuron')
sine = nest.Create('ac_generator', 1,
{'amplitude': 100.0,
'frequency': 2.0})
noise = nest.Create('poisson_generator', 2,
[{'rate': 70000.0},
{'rate': 20000.0}])
voltmeter = nest.Create('voltmeter', 1,
{'withgid': True})
nest.Connect(sine, neuron)
nest.Connect(voltmeter, neuron)
nest.Connect(noise[:1], neuron, syn_spec={'weight': 1.0, 'delay': 1.0})
nest.Connect(noise[1:], neuron, syn_spec={'weight': -1.0, 'delay': 1.0})
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.savefig('../figures/voltage_trace.eps')
|
gorserg/openprocurement.tender.competitivedialogue
|
refs/heads/master
|
openprocurement/tender/competitivedialogue/views/stage2/award_document.py
|
2
|
# -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openua.views.award_document import (
TenderUaAwardDocumentResource
)
from openprocurement.tender.openeu.views.award_document import (
TenderAwardDocumentResource as TenderEUAwardDocumentResource
)
from openprocurement.tender.competitivedialogue.constants import (
STAGE_2_UA_TYPE, STAGE_2_EU_TYPE
)
@optendersresource(name='{}:Tender Award Documents'.format(STAGE_2_EU_TYPE),
collection_path='/tenders/{tender_id}/awards/{award_id}/documents',
path='/tenders/{tender_id}/awards/{award_id}/documents/{document_id}',
procurementMethodType=STAGE_2_EU_TYPE,
description="Tender award documents")
class CompetitiveDialogueStage2EUAwardDocumentResource(TenderEUAwardDocumentResource):
pass
@optendersresource(name='{}:Tender Award Documents'.format(STAGE_2_UA_TYPE),
collection_path='/tenders/{tender_id}/awards/{award_id}/documents',
path='/tenders/{tender_id}/awards/{award_id}/documents/{document_id}',
procurementMethodType=STAGE_2_UA_TYPE,
description="Competitive Dialogue Stage 2 UA award documents")
class CompetitiveDialogueStage2UAAwardDocumentResource(TenderUaAwardDocumentResource):
pass
|
dmazzella/micropython
|
refs/heads/master
|
tests/basics/string_endswith_upy.py
|
32
|
# MicroPython doesn't support tuple argument
try:
"foobar".endswith(("bar", "sth"))
except TypeError:
print("TypeError")
|
soedinglab/hh-suite
|
refs/heads/master
|
scripts/hhmakemodel.py
|
1
|
#!/usr/bin/env python
from hh_reader import read_result
from copy import deepcopy
from pdbx.reader.PdbxReader import PdbxReader
from pdbx.writer.PdbxWriter import PdbxWriter
import re, os, sys, tempfile, glob
from operator import itemgetter # hzhu
from itertools import groupby # hzhu
EMPTY = '*'
GAP = '-'
DEBUG_MODE = False
class Gap:
""" A gap is a continuous stretch of indels.
It is defined by a opening position and a size/length
"""
def __init__(self, open_pos, size):
self.open_pos = open_pos # gap opening position
self.size = size # num of indels in the gap
def __repr__(self):
return 'Gap opening pos = %d, size = %d' % (self.open_pos, self.size)
class Grid:
"""
Implementation of 2D grid of cells
Includes boundary handling
"""
def __init__(self, grid_height, grid_width):
"""
Initializes grid to be empty, take height and width of grid as parameters
Indexed by rows (left to right), then by columns (top to bottom)
"""
self._grid_height = grid_height
self._grid_width = grid_width
self._cells = [ [ EMPTY for dummy_col in range(self._grid_width) ]
for dummy_row in range(self._grid_height)]
def __str__(self):
""" Return multi-line string represenation for grid """
ans = ''
for row in range(self._grid_height):
ans += ''.join(self._cells[row])
ans += '\n'
return ans
def clear(self):
""" Clears grid to be empty """
self._cells = [[EMPTY for dummy_col in range(self._grid_width)]
for dummy_row in range(self._grid_height)]
def get_grid_height(self):
""" Return the height of the grid """
return self._grid_height
def get_grid_width(self):
""" Return the width of the grid """
return self._grid_width
def get_cell(self, row, col):
return self._cells[row][col]
def get_seq_start(self, row):
""" Returns the start position of the sequence """
index = 0
for pos in self._cells[row]:
if pos != EMPTY:
return index
index += 1
return None
def get_seq_end(self, row):
""" Returns the end position of the sequence """
index = 0
for pos in reversed(self._cells[row]):
if pos != EMPTY:
return self.get_grid_width() - index
index += 1
return None
def get_gaps(self, row):
""" Return the position of gaps in a row """
gaps = list()
index = 0
for pos in self._cells[row]:
if pos == GAP:
gaps.append(index)
index += 1
return gaps
def get_gaps_ref_gapless(self, row):
""" Return the pos of gaps in a row.
The opening positions of the gaps are wrt. the gapless seq
"""
# get all the indels
indels = self.get_gaps(row)
gaps = []
# combine continuous indels into a gap
for k,i in groupby( enumerate(indels), lambda x: x[0]-x[1] ):
g = list(map(itemgetter(1), i))
gaps.append( Gap(g[0], len(g)) )
# offset the gap opening positions
for i in range(1, len(gaps)):
# offset by total gap number before
gaps[i].open_pos -= sum([gaps[j].size for j in range(i)])
return gaps # a list of Gap instances
def get_seq_indeces(self, row):
seq = list()
for pos, res in enumerate(self._cells[row]):
if res != EMPTY and res != GAP:
seq.append(pos)
return seq
## def get_gap_list(self): # hzhu commented this out. wrote a new version
## """ Returns a list of list of all gap positions in the sequence grid. """
## gap_pos = set()
## for row in range(self.get_grid_height()):
## for gap in self.get_gaps(row):
## gap_pos.add(gap)
## gap_pos = list(sorted(gap_pos))
## boundaries = [ (x + 1) for x, y in zip(gap_pos, gap_pos[1:]) if y - x != 1 ]
## gap_list = list()
## prev = 0
## for boundary in boundaries:
## sub_list = [ pos for pos in gap_pos[prev:] if pos < boundary ]
## gap_list.append(sub_list)
## prev += len(sub_list)
## gap_list.append([ x for x in gap_pos[prev:]])
## return gap_list
def get_gap_list(self):
""" Returns a list of Gap instances for all rows in the grid
"""
gap_dict = dict() # each position should occur as gap at most once
# keys are gap openning positions
# values are Gap instances
gap_list = []
for row in range(self.get_grid_height()):
gap_pos = []
gaps = self.get_gaps_ref_gapless(row)
for g in gaps:
if g.open_pos in gap_dict: # if there is already gaps at this open pos
if g.size > gap_dict[g.open_pos].size: # if new gap is bigger
gap_dict[g.open_pos] = g # keep the larger gap as they overlap
else:
gap_dict[g.open_pos] = g
gap_list = sorted(list(gap_dict.values()), key=lambda x: x.open_pos) # sort according to start position
return gap_list # a list of Gap instances
def set_gap(self, row, col):
""" Set cell with index (row, col) to be a gap """
self._cells[row][col] = GAP
def set_empty(self, row, col):
""" Set cell with index (row, col) to be a gap """
self._cells[row][col] = EMPTY
def set_cell(self, row, col, res):
""" Set cell with index (row, col) to be full """
self._cells[row][col] = res
def is_empty(self, row, col):
""" Checks whether cell with index (row, col) is empty """
return self._cells[row][col] == EMPTY
def is_gap(self, row, col):
""" Checks whetehr cell with indxex (row, col) is a gap """
return self._cells[row][col] == GAP
def insert_gaps(self, cols):
""" Inserts a gaps into a column of the template grid """
for col in cols:
for row in range(self._grid_height):
if col >= self.get_seq_start(row) and col < self.get_seq_end(row):
self._cells[row].insert(col, GAP)
else:
self._cells[row].insert(col, EMPTY)
self._grid_width += 1
def insert_gaps_row(self, cols, row):
""" Intert gaps into cols only for certain row"""
for col in cols:
if col >= self.get_seq_start(row) and col < self.get_seq_end(row):
self._cells[row].insert(col, GAP)
else:
self._cells[row].insert(col, EMPTY)
# NOTE: grid_with should not be changed after every row is updated.
#self._grid_width += 1
def clean_trail_empty(self):
""" Remove all trailing EMPTY and pad grid to same width"""
# first find out the max length (exluding trailing EMPTY)
max_width = 0
for row in range(self._grid_height):
for i in range(len(self._cells[row])-1, -1, -1):
if self._cells[row][i] != EMPTY:
break
if i+1 > max_width:
max_width = i+1
# delete excessive EMPTY
for row in range(self._grid_height):
del self._cells[row][max_width:]
# then pad all rows to the same length
[self._cells[row].append( EMPTY * (max_width-len(self._cells[row])) ) \
for row in range(self._grid_height) if len(self._cells[row]) < max_width]
self._grid_width = max_width
return
def remove_gaps(self, keep_width=True): # hzhu add keep_width option
""" Removes all gaps from the grid. """
for row in range(self.get_grid_height()):
not_gap = list()
for col in range(self.get_grid_width()):
if not self.is_gap(row, col):
not_gap.append(col)
self._cells[row] = [ self._cells[row][col] for col in not_gap ]
if keep_width: # hzhu only pad to original width if desired
for del_pos in range(self._grid_width - len(not_gap)):
self._cells[row].append(EMPTY)
if not keep_width: # hzhu if width is not kept, make sure width is consistent
self.clean_trail_empty()
return
class QueryGrid(Grid):
def __init__(self, grid_height, grid_width):
Grid.__init__(self, grid_height, grid_width)
def get_query_start(self, row):
""" Returns the query start position """
return self.get_seq_start(row) + 1
def get_query_end(self, row):
""" Returns the query end postion """
return self.get_seq_end(row) - len(self.get_gaps(row))
def get_col_residue(self, col):
""" Tries to find a the query residue in a given column. Used by derive_global_seq() to
identify the global query sequence """
for row in range(self.get_grid_height()):
if not self.is_empty(row, col):
return self._cells[row][col]
return GAP
class TemplateGrid(Grid):
def __init__(self, grid_height, grid_width):
Grid.__init__(self, grid_height, grid_width)
self._start = list()
self._end = list()
self._pdb_code = list()
self._chain = list()
self._organism = list()
self._resolution = list()
def display(self):
""" Return multi-line string represenation for grid """
ans = ''
for row in range(self._grid_height):
ans += '>P1;{p}\nstructure:{p}:{s}:{c}:{e}:{c}::{o}:{r}:\n{a}*\n'.format(
p = self._pdb_code[row],
s = add_white_space_end(self.get_template_start(row), 4),
e = add_white_space_end(self.get_template_end(row), 4),
c = self._chain[row],
o = self._organism[row],
r = self._resolution[row],
a = ''.join(self._cells[row]).replace(EMPTY, GAP).replace('#', GAP))
return ans
def debug(self, row):
""" Return multi-line string represenation for grid, for debugging purposes """
ans = '{p}\nInternal: {s}, {e} Query: {qs}, {qe} Gaps ({g1}): {g2}\n{seq}\n'.format(
p = self._pdb_code[row],
s = self.get_seq_start(row),
e = self.get_seq_end(row),
qs = self.get_template_start(row),
qe = self.get_template_end(row),
g1 = len(self.get_gaps(row)),
g2 = ', '.join([str(gap) for gap in self.get_gaps(row)]),
seq = ''.join(self._cells[row]))
return ans
def set_metadata(self, row, start, end, pdb_code, chain, organism, resolution):
""" Used by create_template_grid() to setup metadata of pir template """
self._start.append(start)
self._end.append(end)
self._pdb_code.append(pdb_code)
self._chain.append(chain)
self._organism.append(organism)
self._resolution.append(resolution)
def set_map(self, row, start, end):
self._start[row] = start
self._end[row] = end
def get_template_start(self, row):
""" Returns the template start position """
return self._start[row]
def get_template_end(self, row):
""" Return sthe template end position """
return self._end[row]
def del_row(self, row):
""" Removes a complete template entry from the grid """
del self._cells[row]
del self._start[row]
del self._end[row]
del self._pdb_code[row]
del self._chain[row]
del self._organism[row]
del self._resolution[row]
self._grid_height -= 1
# Helper functions
def add_white_space_end(string, length):
""" Adds whitespaces to a string until it has the wished length"""
edited_string = str(string)
if len(edited_string) >= length:
return string
else:
while len(edited_string) != length:
edited_string += ' '
return edited_string
def convert_aa_code(three_letter, convert):
"""
Assumes a string that contains a three letter aminoacid code and
returns the corresponding one letter code.
"""
aa_code = {
'CYS': 'C',
'ASP': 'D',
'SER': 'S',
'GLN': 'Q',
'LYS': 'K',
'ILE': 'I',
'PRO': 'P',
'THR': 'T',
'PHE': 'F',
'ASN': 'N',
'GLY': 'G',
'HIS': 'H',
'LEU': 'L',
'ARG': 'R',
'TRP': 'W',
'ALA': 'A',
'VAL': 'V',
'GLU': 'E',
'TYR': 'Y',
'MET': 'M',
}
non_canonical = {
'MSE': 1,
'HYP': 2,
'MLY': 3,
'SEP': 4,
'TPO': 5,
'CSO': 6,
'PTR': 7,
'KCX': 8,
'CME': 9,
'CSD': 10,
'CAS': 11,
'MLE': 12,
'DAL': 13,
'CGU': 14,
'DLE': 15,
'FME': 16,
'DVA': 17,
'OCS': 18,
'DPR': 19,
'MVA': 20,
'TYS': 21,
'M3L': 22,
'SMC': 23,
'ALY': 24,
'CSX': 25,
'DCY': 26,
'NLE': 27,
'DGL': 28,
'DSN': 29,
'CSS': 30,
'DLY': 31,
'MLZ': 32,
'DPN': 33,
'DAR': 34,
'PHI': 35,
'IAS': 36,
'DAS': 37,
'HIC': 38,
'MP8': 39,
'DTH': 40,
'DIL': 41,
'MEN': 42,
'DTY': 43,
'CXM': 44,
'DGN': 45,
'DTR': 46,
'SAC': 47,
'DSG': 48,
'MME': 49,
'MAA': 50,
'YOF': 51,
'FP9': 52,
'FVA': 53,
'MLU': 54,
'OMY': 55,
'FGA': 56,
'MEA': 57,
'CMH': 58,
'DHI': 59,
'SEC': 60,
'OMZ': 61,
'SCY': 62,
'MHO': 63,
'MED': 64,
'CAF': 65,
'NIY': 66,
'OAS': 67,
'SCH': 68,
'MK8': 69,
'SME': 70,
'LYZ': 71
}
if three_letter in aa_code.keys():
return aa_code[three_letter]
elif convert and (three_letter in non_canonical.keys()):
return non_canonical[three_letter]
else:
return '-'
def get_query_name(hhr_file):
with open(hhr_file) as fh:
for line in fh:
if line.startswith('Query'):
# match the PDB Code
m = re.search('(\d[A-Z0-9]{3})_(\S)', line)
if m:
pdb_code = m.group(1)
chain = m.group(2)
else:
pdb_code = 'UKNP'
chain = 'A'
# raise ValueError('Input HHR-File Does not seem to be a PDB-Structure')
break
return pdb_code, chain
def get_cif_files(folder):
""" Gets all cif files located in folder. """
return glob(os.path.join(folder, '*.cif'))
def open_cif(cif_file):
""" Assumes a mmCif file and returns a data block used for subsequent procedures """
# The "usual" procedure to open a mmCIF with pdbX/mmCIF
with open(cif_file) as cif_fh:
data = []
reader = PdbxReader(cif_fh)
reader.read(data)
block = data[0]
return block
def get_pdb_entry_id(block):
""" Extracts the PDB entry information of a cif file and returns it as a string """
entry = block.getObj('entry')
entry_id = entry.getValue('id')
return entry_id
def template_id_to_pdb(template_id):
"""
Extracts PDB ID and chain name from the provided template id
"""
# match PDBID without chain (8fab, 1a01)
m = re.match(r'/^(\d[A-Za-z0-9]{3})$', template_id)
if m:
return m.group(1).upper(), 'A'
# PDB CODE with chain Identifier
m = re.match(r'^(\d[A-Za-z0-9]{3})_(\S)$', template_id)
if m:
return m.group(1).upper(), m.group(2).upper()
# Match DALI ID
m = re.match(r'^(\d[A-Za-z0-9]{3})([A-Za-z0-9]?)_\d+$', template_id)
if m:
return m.group(1).upper(), m.group(2).upper()
# No PDB code and chain identified
return None, None
def create_template_grid(hhr_data):
""" Creates a template grid """
total_seq = len(hhr_data)
templ_max = max( [ hhr.start[0] + len(to_seq(hhr.template_ali)) for hhr in hhr_data ] ) - 1
template_grid = TemplateGrid(total_seq, templ_max)
for row, template in enumerate(hhr_data):
seq_start = template.start[0] - 1
templatealignment = to_seq(template.template_ali)
seq_end = seq_start + len(templatealignment)
# Load Meta Data
start = template.start[1]
end = template.end[1]
# Get pdb_code and chain identifier of template
pdb_code, chain = template_id_to_pdb(template.template_id)
m = re.search("(\d+.\d+)A", template.template_info) # try to extract resolution of the structure
if m:
resolution = m.group(1)
else:
resolution = ""
m = re.search("\{(.*)\}", template.template_info) # try to extract the organism
if m:
organism = m.group(1).replace(":", " ") # make sure that no colons are in the organism
else:
organism = ""
template_grid.set_metadata(row, start, end, pdb_code, chain, organism, resolution)
# Write sequence into the grid
for pos, col in enumerate(range(seq_start, seq_end)):
template_grid.set_cell(row, col, templatealignment[pos])
return template_grid
def to_seq(ali):
if isinstance(ali, list):
return ''.join(ali)
else:
return ali
def create_query_grid(hhr_data):
""" Creates a Query Grid """
total_seq = len(hhr_data)
query_max = max( [ hhr.start[0] + len(to_seq(hhr.query_ali)) for hhr in hhr_data ] ) - 1
query_grid = QueryGrid(total_seq, query_max)
for row, query in enumerate(hhr_data):
queryalignment = to_seq(query.query_ali)
query_start = query.start[0] - 1
query_end = query_start + len(queryalignment)
for pos, col in enumerate(range(query_start, query_end)):
if queryalignment[pos] not in ['Z', 'U', 'O', 'J', 'X', 'B']: # CAUTION
query_grid.set_cell(row, col, queryalignment[pos])
return query_grid
def create_gapless_grid(grid):
""" Returns a gapless grid """
gapless = deepcopy(grid)
gapless.remove_gaps(keep_width=False) # hzhu: shrink grid
return gapless
def process_query_grid(query_grid, gapless_grid):
""" Processes a query grid sucht that it contains all gaps
"""
gaplist = query_grid.get_gap_list()
off_set = 0
for g in gaplist:
gapless_grid.insert_gaps([ p + off_set for p in range(g.open_pos, g.open_pos+g.size) ])
off_set += g.size
return gapless_grid
def derive_global_seq(processed_query_grid, query_name, query_chain):
global_seq = list()
for col in range(processed_query_grid.get_grid_width()):
global_seq.append(processed_query_grid.get_col_residue(col))
# this is the query entry
header = '>P1;{q}\nsequence:{q}:1 :{c}:{l} :{c}::::\n'.format(
q = query_name,
l = len(global_seq),
c = query_chain)
return header + ''.join(global_seq) + '*'
def process_template_grid(query_grid, template_grid):
""" Insertes Gaps into the template grid
Only add gaps from **other** query_grids into template grid (NOT gapless)
"""
gaplist = query_grid.get_gap_list() # use this to keep the offset
for row in range(template_grid.get_grid_height()):
# do NOT consider gaps in current query row
gaplist_row = query_grid.get_gaps_ref_gapless(row)
gapdict_row = dict(zip([g.open_pos for g in gaplist_row],
[g.size for g in gaplist_row]))
off_set = 0
for g in gaplist:
# if there is a gap with same opening position in the current row,
# only consider g if it is larger than the on in the current row
if g.open_pos in gapdict_row:
if g.size > gapdict_row[g.open_pos]:
template_grid.insert_gaps_row([ p + off_set for p in range(g.open_pos,
g.open_pos+g.size-gapdict_row[g.open_pos]) ], row)
else:
template_grid.insert_gaps_row([ p + off_set for p in range(g.open_pos, g.open_pos+g.size) ], row)
off_set += g.size # even if the gaps are not inserted, the offset should be adjusted
template_grid.clean_trail_empty() # clean the redundant trailing EMPTY char
return template_grid
def compare_with_cifs(template_grid, folder, output_path, convert, threshold):
"""
Compare the PIR Alignment with Atomsection of a mmCIF file. To make the ATOM-Section of
a mmCIF file compatible with MODELLER, each residue has in the ATOM-Section has to match
corresponding positions in the PIR-Alignment
"""
# glob the mmCif files from given directory and map the PDB identifier to the path
cif_files = glob.glob(os.path.join(folder, '*.cif'))
cif_paths = { path.split('/')[-1].split('.')[0].upper() : path for path in cif_files }
cif_edits = dict()
# create the path where renumbered cifs are saved to
if not os.path.exists(output_path):
os.mkdir(output_path)
# if the cif does not contain any residue of the por alignment we delete it
del_row = list()
for row in range(template_grid.get_grid_height()):
# get the pdb code and strand id from the current template
pdb_code = template_grid._pdb_code[row]
chain = template_grid._chain[row] # hhr users pdb chain ID
# load mmCif file accordingly
if pdb_code in cif_edits.keys():
block = cif_edits[pdb_code]
else:
try:
block = open_cif(cif_paths[pdb_code])
except KeyError:
del_row.append(row)
print ('! Did not find the mmCIF file for {pdb}. Removing it from the alignment.'.format(
pdb = pdb_code))
continue
# Create a mapping of the atom site
atom_site = block.getObj('atom_site')
########################################################################
## Get the mapping of the residues in the atom section ##
########################################################################
cif_seq = dict()
# For the case that we have to rename a chain
cif_chains = set([])
# Iterate through the atomsection of the cif file
for atom_row in range(0, atom_site.getRowCount()):
try:
if atom_site.getValue('label_comp_id', atom_row) == 'HOH':
continue
cif_chain = atom_site.getValue('label_asym_id', atom_row)
pdb_chain = atom_site.getValue('auth_asym_id', atom_row) # use PDB chain ID
except IndexError:
pass
cif_chains.add(cif_chain)
# We do not care about the residues apart from the chain
#if cif_chain != chain: # hzhu
if pdb_chain != chain: # hhr uses PDB chain, not the cif chain! hzhu
continue
# and update the chain id from pdb_chain to cif_chain
if atom_site.getValue('group_PDB', atom_row).startswith('ATOM'): # hzhu in case HETATM ruins ch id
template_grid._chain[row] = cif_chain
# get the residue and the residue number
try:
res_num = int(atom_site.getValue("label_seq_id", atom_row))
except ValueError:
continue
residue = atom_site.getValue('label_comp_id', atom_row)
residue = convert_aa_code(residue, convert)
if res_num not in cif_seq.keys():
cif_seq[res_num] = residue
elif res_num in cif_seq.keys() and cif_seq[res_num] == residue:
continue
elif res_num in cif_seq.keys() and cif_seq[res_num] != residue:
cif_seq[res_num] = '-'
if DEBUG_MODE:
print ('! {p} {c}: mmCIF contains a residue position that is assigned {cr} to two residues. Removing it.'.format(
p = pdb_code,
c = chain,
cr = res_num))
########################################################################
## Rename chain if necessary ##
########################################################################
chain_idx = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if len(template_grid._chain[row]) != 1:
i = 0
new_chain = 0
while i < len(chain_idx):
if chain_idx[i] in cif_chains:
if DEBUG_MODE:
print ('! {p} {c}: Chain identifier {i} is already taken.'.format(
p = pdb_code,
c = chain,
i = chain_idx[i]))
i += 1
else:
new_chain = chain_idx[i]
break
if new_chain == 0:
if DEBUG_MODE:
print ('! {p} {c}: Could not use {p}. The chain identifier {c} is not compatible with MODELLER (2 letters) and could not be renanmed.'.format(
p = pdb_code,
c = chain))
del_row.append(row)
continue
if new_chain != 0:
print ('Selected new chain name {c}'.format(c = new_chain))
#TODO
########################################################################
## Compare cif positions with the atom positions ##
########################################################################
del_pos = list()
mod_pos = dict()
mapping = dict()
for pos_cif, pos_tem in zip(range(template_grid.get_template_start(row),
template_grid.get_template_end(row) + 1), template_grid.get_seq_indeces(row)):
res_tem = template_grid.get_cell(row, pos_tem)
try:
res_cif = cif_seq[pos_cif]
except KeyError:
res_cif = -1
match = True if res_tem == res_cif else False
if not match:
if res_cif == 1 and res_tem == 'M':
mod_pos[pos_cif] = 1
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 2 and res_tem == 'P':
mod_pos[pos_cif] = 2
mapping[(pos_tem, res_tem)] = (pos_cif, 'P')
elif res_cif == 3 and res_tem == 'K':
mod_pos[pos_cif] = 3
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 4 and res_tem == 'S':
mod_pos[pos_cif] = 4
mapping[(pos_tem, res_tem)] = (pos_cif, 'S')
elif res_cif == 5 and res_tem == 'T':
mod_pos[pos_cif] = 5
mapping[(pos_tem, res_tem)] = (pos_cif, 'T')
elif res_cif == 6 and res_tem == 'C':
mod_pos[pos_cif] = 6
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 7 and res_tem == 'Y':
mod_pos[pos_cif] = 7
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 8 and res_tem == 'K':
mod_pos[pos_cif] = 8
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 9 and res_tem == 'C':
mod_pos[pos_cif] = 9
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 10 and res_tem == 'A':
mod_pos[pos_cif] = 10
mapping[(pos_tem, res_tem)] = (pos_cif, 'A')
elif res_cif == 11 and res_tem == 'C':
mod_pos[pos_cif] = 11
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 12 and res_tem == 'L':
mod_pos[pos_cif] = 12
mapping[(pos_tem, res_tem)] = (pos_cif, 'L')
elif res_cif == 13 and res_tem == 'A':
mod_pos[pos_cif] = 13
mapping[(pos_tem, res_tem)] = (pos_cif, 'A')
elif res_cif == 14 and res_tem == 'E':
mod_pos[pos_cif] = 14
mapping[(pos_tem, res_tem)] = (pos_cif, 'E')
elif res_cif == 15 and res_tem == 'L':
mod_pos[pos_cif] = 15
mapping[(pos_tem, res_tem)] = (pos_cif, 'L')
elif res_cif == 16 and res_tem == 'M':
mod_pos[pos_cif] = 16
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 17 and res_tem == 'V':
mod_pos[pos_cif] = 17
mapping[(pos_tem, res_tem)] = (pos_cif, 'V')
elif res_cif == 18 and res_tem == 'C':
mod_pos[pos_cif] = 18
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 19 and res_tem == 'P':
mod_pos[pos_cif] = 19
mapping[(pos_tem, res_tem)] = (pos_cif, 'P')
elif res_cif == 20 and res_tem == 'V':
mod_pos[pos_cif] = 20
mapping[(pos_tem, res_tem)] = (pos_cif, 'V')
elif res_cif == 21 and res_tem == 'Y':
mod_pos[pos_cif] = 21
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 22 and res_tem == 'K':
mod_pos[pos_cif] = 22
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 23 and res_tem == 'C':
mod_pos[pos_cif] = 23
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 24 and res_tem == 'K':
mod_pos[pos_cif] = 24
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 25 and res_tem == 'C':
mod_pos[pos_cif] = 25
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 26 and res_tem == 'C':
mod_pos[pos_cif] = 26
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 27 and res_tem == 'L':
mod_pos[pos_cif] = 27
mapping[(pos_tem, res_tem)] = (pos_cif, 'L')
elif res_cif == 28 and res_tem == 'E':
mod_pos[pos_cif] = 28
mapping[(pos_tem, res_tem)] = (pos_cif, 'E')
elif res_cif == 29 and res_tem == 'S':
mod_pos[pos_cif] = 29
mapping[(pos_tem, res_tem)] = (pos_cif, 'S')
elif res_cif == 30 and res_tem == 'C':
mod_pos[pos_cif] = 30
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 31 and res_tem == 'K':
mod_pos[pos_cif] = 31
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 32 and res_tem == 'K':
mod_pos[pos_cif] = 32
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
elif res_cif == 33 and res_tem == 'F':
mod_pos[pos_cif] = 33
mapping[(pos_tem, res_tem)] = (pos_cif, 'F')
elif res_cif == 34 and res_tem == 'R':
mod_pos[pos_cif] = 34
mapping[(pos_tem, res_tem)] = (pos_cif, 'R')
elif res_cif == 35 and res_tem == 'F':
mod_pos[pos_cif] = 35
mapping[(pos_tem, res_tem)] = (pos_cif, 'F')
elif res_cif == 36 and res_tem == 'D':
mod_pos[pos_cif] = 36
mapping[(pos_tem, res_tem)] = (pos_cif, 'D')
elif res_cif == 37 and res_tem == 'D':
mod_pos[pos_cif] = 37
mapping[(pos_tem, res_tem)] = (pos_cif, 'D')
elif res_cif == 38 and res_tem == 'H':
mod_pos[pos_cif] = 38
mapping[(pos_tem, res_tem)] = (pos_cif, 'H')
elif res_cif == 39 and res_tem == 'P':
mod_pos[pos_cif] = 39
mapping[(pos_tem, res_tem)] = (pos_cif, 'P')
elif res_cif == 40 and res_tem == 'T':
mod_pos[pos_cif] = 40
mapping[(pos_tem, res_tem)] = (pos_cif, 'T')
elif res_cif == 41 and res_tem == 'I':
mod_pos[pos_cif] = 41
mapping[(pos_tem, res_tem)] = (pos_cif, 'I')
elif res_cif == 42 and res_tem == 'N':
mod_pos[pos_cif] = 42
mapping[(pos_tem, res_tem)] = (pos_cif, 'N')
elif res_cif == 43 and res_tem == 'Y':
mod_pos[pos_cif] = 43
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 44 and res_tem == 'M':
mod_pos[pos_cif] = 44
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 45 and res_tem == 'G':
mod_pos[pos_cif] = 45
mapping[(pos_tem, res_tem)] = (pos_cif, 'G')
elif res_cif == 46 and res_tem == 'W':
mod_pos[pos_cif] = 46
mapping[(pos_tem, res_tem)] = (pos_cif, 'W')
elif res_cif == 47 and res_tem == 'S':
mod_pos[pos_cif] = 47
mapping[(pos_tem, res_tem)] = (pos_cif, 'S')
elif res_cif == 48 and res_tem == 'N':
mod_pos[pos_cif] = 48
mapping[(pos_tem, res_tem)] = (pos_cif, 'N')
elif res_cif == 49 and res_tem == 'M':
mod_pos[pos_cif] = 49
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 50 and res_tem == 'A':
mod_pos[pos_cif] = 50
mapping[(pos_tem, res_tem)] = (pos_cif, 'A')
elif res_cif == 51 and res_tem == 'Y':
mod_pos[pos_cif] = 51
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 52 and res_tem == 'P':
mod_pos[pos_cif] = 52
mapping[(pos_tem, res_tem)] = (pos_cif, 'P')
elif res_cif == 53 and res_tem == 'V':
mod_pos[pos_cif] = 53
mapping[(pos_tem, res_tem)] = (pos_cif, 'V')
elif res_cif == 54 and res_tem == 'L':
mod_pos[pos_cif] = 54
mapping[(pos_tem, res_tem)] = (pos_cif, 'L')
elif res_cif == 55 and res_tem == 'Y':
mod_pos[pos_cif] = 55
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 56 and res_tem == 'E':
mod_pos[pos_cif] = 56
mapping[(pos_tem, res_tem)] = (pos_cif, 'E')
elif res_cif == 57 and res_tem == 'F':
mod_pos[pos_cif] = 57
mapping[(pos_tem, res_tem)] = (pos_cif, 'F')
elif res_cif == 58 and res_tem == 'C':
mod_pos[pos_cif] = 58
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 59 and res_tem == 'H':
mod_pos[pos_cif] = 59
mapping[(pos_tem, res_tem)] = (pos_cif, 'H')
elif res_cif == 60 and res_tem == 'C':
mod_pos[pos_cif] = 60
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 61 and res_tem == 'Y':
mod_pos[pos_cif] = 61
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 62 and res_tem == 'C':
mod_pos[pos_cif] = 62
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 63 and res_tem == 'M':
mod_pos[pos_cif] = 63
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 64 and res_tem == 'M':
mod_pos[pos_cif] = 64
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 65 and res_tem == 'C':
mod_pos[pos_cif] = 65
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 66 and res_tem == 'Y':
mod_pos[pos_cif] = 66
mapping[(pos_tem, res_tem)] = (pos_cif, 'Y')
elif res_cif == 67 and res_tem == 'S':
mod_pos[pos_cif] = 67
mapping[(pos_tem, res_tem)] = (pos_cif, 'S')
elif res_cif == 68 and res_tem == 'C':
mod_pos[pos_cif] = 68
mapping[(pos_tem, res_tem)] = (pos_cif, 'C')
elif res_cif == 69 and res_tem == 'L':
mod_pos[pos_cif] = 69
mapping[(pos_tem, res_tem)] = (pos_cif, 'L')
elif res_cif == 70 and res_tem == 'M':
mod_pos[pos_cif] = 70
mapping[(pos_tem, res_tem)] = (pos_cif, 'M')
elif res_cif == 71 and res_tem == 'K':
mod_pos[pos_cif] = 71
mapping[(pos_tem, res_tem)] = (pos_cif, 'K')
else:
# insert a gap
template_grid.set_empty(row, pos_tem)
mapping[(pos_tem, res_tem)] = (pos_cif, res_cif)
if DEBUG_MODE:
print ('! {p} {c}: template pos {pt} ({rt}) does not match cif pos {pc} ({rc}). Replacing with gap.'.format(
p = pdb_code,
c = chain,
pt = pos_tem,
rt = res_tem,
pc = pos_cif,
rc = res_cif if res_cif != -1 else 'DNE'))
if res_cif != -1:
del_pos.append(pos_cif)
else:
mapping[(pos_tem, res_tem)] = (pos_cif, res_cif)
# adjust template start and end positions
correct_mapping = { key:value for key, value in mapping.items() if key[1] == value[1] }
try:
tstart = correct_mapping[sorted(correct_mapping.keys())[0]][0]
tend = correct_mapping[sorted(correct_mapping.keys())[-1]][0]
template_grid.set_map(row, tstart, tend)
except IndexError:
# This exception handles cases in which all residues were deleted
if DEBUG_MODE:
print ('! {p} {c}: Removing {p} from alignment. No residues matched the alignment sequence.'.format(
p = pdb_code,
c = chain))
del_row.append(row)
continue
########################################################################
## Delete rows from the PIR Alignment if the residue ratio is to low ##
########################################################################
if threshold > 0:
gaps = 0
res = 0
for col in range(template_grid.get_grid_width()):
if template_grid.is_empty(row, col):
template_grid.set_gap(row, col)
if template_grid.is_gap(row, col):
gaps += 1
else:
res += 1
ratio = res/float(gaps + res)
if ratio > threshold:
print ('! Template {p} successfully passed residue ratio ({r:.2f} / {t}).'.format(
p = pdb_code,
r = ratio,
t = threshold ))
else:
print ('! Template {p} did not passed residue ratio ({r:.2f} / {t}). Removing it from pir Alignment.'.format(
p = pdb_code,
r = ratio,
t = threshold ))
if row not in del_row:
del_row.append(row)
continue
########################################################################
## Edit cif files ##
########################################################################
rem_row = list() # verbosity: saves information about removed residues
mod_row = list() # verbosity: saves information about modified residues
cha_row = list() # verbosity: saves any other changes
for atom_row in reversed(range(0, atom_site.getRowCount())):
try:
cif_chain = atom_site.getValue('label_asym_id', atom_row)
except IndexError:
pass
# We do not care about the residues apart from the chain
if cif_chain != chain:
continue
# get the residue number
try:
res_num = int(atom_site.getValue("label_seq_id", atom_row))
except ValueError:
continue
# pdb_PDB_model_num has to be set to 1
try:
model_num = int(atom_site.getValue('pdbx_PDB_model_num', atom_row))
except IndexError:
model_num = 1 # if we cannot extract, assume that it is alright
try:
ins_code = atom_site.getValue('pdbx_PDB_ins_code', atom_row)
except IndexError:
ins_code = '?' # assume it has no insertion code
group_PDB = atom_site.getValue('group_PDB', atom_row)
residue = atom_site.getValue('label_comp_id', atom_row)
residue = convert_aa_code(residue, convert)
# MODELLER accepts only structures if pdbx_PDB_model_num is set to 1
if model_num != 1:
if (res_num, residue, 'model_num') not in cha_row:
cha_row.append((res_num, residue, 'model_num'))
atom_site.setValue(1, "pdbx_PDB_model_num", atom_row)
if ins_code != '?':
if (res_num, residue, 'ins_code') not in cha_row:
cha_row.append((res_num, residue, 'ins_code'))
atom_site.setValue('?', "pdbx_PDB_ins_code", atom_row)
if group_PDB != 'ATOM':
if (res_num, residue, 'group_PDB') not in cha_row:
cha_row.append((res_num, residue, 'group_PDB'))
atom_site.setValue('ATOM', 'group_PDB', atom_row)
########################################################################
## Delete residues ##
########################################################################
if res_num in del_pos:
if (res_num, residue) not in rem_row:
rem_row.append((res_num, residue))
atom_site.removeRow(atom_row)
########################################################################
## Modify residues ##
########################################################################
if res_num in mod_pos.keys():
# Get the data
type_symbol = atom_site.getValue('type_symbol', atom_row)
label_atom_id = atom_site.getValue('label_atom_id', atom_row)
auth_atom_id = atom_site.getValue('auth_atom_id', atom_row)
if mod_pos[res_num] == 1: # try to convert MSE to M
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if type_symbol == 'SE':
atom_site.setValue('S', 'type_symbol', atom_row)
if label_atom_id == 'SE':
atom_site.setValue('S', 'label_atom_id', atom_row)
if auth_atom_id == 'SE':
atom_site.setValue('S', 'auth_atom_id', atom_row)
if (res_num, residue, 'MSE -> MET') not in mod_row:
mod_row.append((res_num, residue, 'MSE -> MET'))
elif mod_pos[res_num] == 2: # try to convert HYP to PRO
# apparently it is enough to rename the label_comp_id to PRO to get
# MODELLER working with Hydroxyprolines (HYP)
atom_site.setValue('PRO', 'label_comp_id', atom_row)
try:
atom_site.setValue('PRO', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'HYP -> PRO') not in mod_row:
mod_row.append((res_num, residue, 'HYP -> PRO'))
elif mod_pos[res_num] == 3: # try to convert MLY to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MLY -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'MLY -> LYS'))
elif mod_pos[res_num] == 4: # converts Phosphoserine to Serine
atom_site.setValue('SER', 'label_comp_id', atom_row)
try:
atom_site.setValue('SER', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SEP -> SER') not in mod_row:
mod_row.append((res_num, residue, 'SEP -> SER'))
elif mod_pos[res_num] == 5: # converts Phosphothreonine to Threonine
atom_site.setValue('THR', 'label_comp_id', atom_row)
try:
atom_site.setValue('THR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'TPO -> THR') not in mod_row:
mod_row.append((res_num, residue, 'TPO -> THR'))
elif mod_pos[res_num] == 6: # converts S-HYDROXYCYSTEINE to Cysteine
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CSO -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CSO -> CYS'))
elif mod_pos[res_num] == 7: # converts O-PHOSPHOTYROSINE to Tyrosine
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'PTR -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'PTR -> TYR'))
elif mod_pos[res_num] == 8: # converts LYSINE NZ-CARBOXYLIC ACID to Lysine
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'KCX -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'KCX -> LYS'))
elif mod_pos[res_num] == 9: # converts S,S-(2-HYDROXYETHYL)THIOCYSTEINE to Cysteine
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CME -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CME -> CYS'))
elif mod_pos[res_num] == 10: # converts 3-SULFINOALANINE to Alanine
atom_site.setValue('ALA', 'label_comp_id', atom_row)
try:
atom_site.setValue('ALA', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CSD -> ALA') not in mod_row:
mod_row.append((res_num, residue, 'CSD -> ALA'))
elif mod_pos[res_num] == 11: # converts S-(DIMETHYLARSENIC)CYSTEINE to Cysteine
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CAS -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CAS -> CYS'))
elif mod_pos[res_num] == 12: # converts N-METHYLLEUCINE (MLE) to Leucine
atom_site.setValue('LEU', 'label_comp_id', atom_row)
try:
atom_site.setValue('LEU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MLE -> LEU') not in mod_row:
mod_row.append((res_num, residue, 'MLE -> LEU'))
elif mod_pos[res_num] == 13: # converts D-ALANINE (DAL) to ALA
atom_site.setValue('ALA', 'label_comp_id', atom_row)
try:
atom_site.setValue('ALA', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DAL -> ALA') not in mod_row:
mod_row.append((res_num, residue, 'DAL -> ALA'))
elif mod_pos[res_num] == 14: # converts GAMMA-CARBOXY-GLUTAMIC ACID (CGU) to GLU
atom_site.setValue('GLU', 'label_comp_id', atom_row)
try:
atom_site.setValue('GLU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CGU -> GLU') not in mod_row:
mod_row.append((res_num, residue, 'CGU -> GLU'))
elif mod_pos[res_num] == 15: # converts D-LEUCINE (DLE) to LEU
atom_site.setValue('LEU', 'label_comp_id', atom_row)
try:
atom_site.setValue('LEU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DLE -> LEU') not in mod_row:
mod_row.append((res_num, residue, 'DLE -> LEU'))
elif mod_pos[res_num] == 16: # converts N-FORMYLMETHIONINE (FME) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'FME -> MET') not in mod_row:
mod_row.append((res_num, residue, 'FME -> MET'))
elif mod_pos[res_num] == 17: # converts D-VAL (DVA) to VAL
atom_site.setValue('VAL', 'label_comp_id', atom_row)
try:
atom_site.setValue('VAL', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DVA -> VAL') not in mod_row:
mod_row.append((res_num, residue, 'DVA -> VAL'))
elif mod_pos[res_num] == 18: # converts CYSTEINESULFONIC ACID (OCS) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'OCS -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'OCS -> CYS'))
elif mod_pos[res_num] == 19: # converts D-PROLINE (DPR) to PRO
atom_site.setValue('PRO', 'label_comp_id', atom_row)
try:
atom_site.setValue('PRO', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DPR -> PRO') not in mod_row:
mod_row.append((res_num, residue, 'DPR -> PRO'))
elif mod_pos[res_num] == 20: # converts N-METHYLVALINE (MVA) to VAL
atom_site.setValue('VAL', 'label_comp_id', atom_row)
try:
atom_site.setValue('VAL', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MVA -> VAL') not in mod_row:
mod_row.append((res_num, residue, 'MVA -> VAL'))
elif mod_pos[res_num] == 21: # converts O-SULFO-L-TYROSINE (TYS) to VAL
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'TYS -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'TYS -> TYR'))
elif mod_pos[res_num] == 22: # converts N-TRIMETHYLLYSINE (M3L) to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'M3L -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'M3L -> LYS'))
elif mod_pos[res_num] == 23: # converts S-METHYLCYSTEINE (SMC) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SMC -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'SMC -> CYS'))
elif mod_pos[res_num] == 24: # converts N(6)-ACETYLLYSINE (ALY) to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'ALY -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'ALY -> LYS'))
elif mod_pos[res_num] == 25: # converts S-OXY CYSTEINE (CSX) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CSX -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CSX -> CYS'))
elif mod_pos[res_num] == 26: # converts D-CYSTEINE (DCY) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DCY -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'DCY -> CYS'))
elif mod_pos[res_num] == 27: # converts NORLEUCINE (NLE) to LEU
atom_site.setValue('LEU', 'label_comp_id', atom_row)
try:
atom_site.setValue('LEU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'NLE -> LEU') not in mod_row:
mod_row.append((res_num, residue, 'NLE -> LEU'))
elif mod_pos[res_num] == 28: # converts D-GLUTAMIC ACID (DGL) to GLU
atom_site.setValue('GLU', 'label_comp_id', atom_row)
try:
atom_site.setValue('GLU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DGL -> GLU') not in mod_row:
mod_row.append((res_num, residue, 'DGL -> GLU'))
elif mod_pos[res_num] == 29: # converts D-SERINE (DSN) to SER
atom_site.setValue('SER', 'label_comp_id', atom_row)
try:
atom_site.setValue('SER', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DSN -> SER') not in mod_row:
mod_row.append((res_num, residue, 'DSN -> SER'))
elif mod_pos[res_num] == 30: # converts S-MERCAPTOCYSTEINE (CSS) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CSS -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CSS -> CYS'))
elif mod_pos[res_num] == 31: # converts D-LYSINE (DLY) to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DLY -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'DLY -> LYS'))
elif mod_pos[res_num] == 32: # converts N-METHYL-LYSINE (MLZ) to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MLZ -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'MLZ -> LYS'))
elif mod_pos[res_num] == 33: # converts D-PHENYLALANINE (DPN) to PHE
atom_site.setValue('PHE', 'label_comp_id', atom_row)
try:
atom_site.setValue('PHE', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DPN -> PHE') not in mod_row:
mod_row.append((res_num, residue, 'DPN -> PHE'))
elif mod_pos[res_num] == 34: # converts D-ARGININE (DAR) to ARG
atom_site.setValue('ARG', 'label_comp_id', atom_row)
try:
atom_site.setValue('ARG', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DAR -> ARG') not in mod_row:
mod_row.append((res_num, residue, 'DAR -> ARG'))
elif mod_pos[res_num] == 35: # converts IODO-PHENYLALANINE (PHI) to PHE
atom_site.setValue('PHE', 'label_comp_id', atom_row)
try:
atom_site.setValue('PHE', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'PHI -> PHE') not in mod_row:
mod_row.append((res_num, residue, 'PHI -> PHE'))
elif mod_pos[res_num] == 36: # converts BETA-L-ASPARTIC ACID (IAS) to ASP
atom_site.setValue('ASP', 'label_comp_id', atom_row)
try:
atom_site.setValue('ASP', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'IAS -> ASP') not in mod_row:
mod_row.append((res_num, residue, 'IAS -> ASP'))
elif mod_pos[res_num] == 37: # converts D-ASPARTIC ACID (DAS) to ASP
atom_site.setValue('ASP', 'label_comp_id', atom_row)
try:
atom_site.setValue('ASP', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DAS -> ASP') not in mod_row:
mod_row.append((res_num, residue, 'DAS -> ASP'))
elif mod_pos[res_num] == 38: # converts 4-METHYL-HISTIDINE (HIC) to HIS
atom_site.setValue('HIS', 'label_comp_id', atom_row)
try:
atom_site.setValue('HIS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'HIC -> HIS') not in mod_row:
mod_row.append((res_num, residue, 'HIC -> HIS'))
elif mod_pos[res_num] == 39: # converts (4R)-4-methyl-L-proline (MP8) to PRO
atom_site.setValue('PRO', 'label_comp_id', atom_row)
try:
atom_site.setValue('PRO', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MP8 -> PRO') not in mod_row:
mod_row.append((res_num, residue, 'MP8 -> PRO'))
elif mod_pos[res_num] == 40: # converts D-THREONINE (DTH) to THR
atom_site.setValue('THR', 'label_comp_id', atom_row)
try:
atom_site.setValue('THR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DTH -> THR') not in mod_row:
mod_row.append((res_num, residue, 'DTH -> THR'))
elif mod_pos[res_num] == 41: # converts D-ISOLEUCINE (DIL) to ILE
atom_site.setValue('ILE', 'label_comp_id', atom_row)
try:
atom_site.setValue('ILE', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DIL -> ILE') not in mod_row:
mod_row.append((res_num, residue, 'DIL -> ILE'))
elif mod_pos[res_num] == 42: # converts N-METHYL ASPARAGINE (MEN) to ASN
atom_site.setValue('ASN', 'label_comp_id', atom_row)
try:
atom_site.setValue('ASN', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MEN -> ASN') not in mod_row:
mod_row.append((res_num, residue, 'MEN -> ASN'))
elif mod_pos[res_num] == 43: # converts D-TYROSINE (DTY) to TYR
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DTY -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'DTY -> TYR'))
elif mod_pos[res_num] == 44: # converts N-CARBOXYMETHIONINE (CXM) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CXM -> MET') not in mod_row:
mod_row.append((res_num, residue, 'CXM -> MET'))
elif mod_pos[res_num] == 45: # converts D-GLUTAMINE (DGN) to MET
atom_site.setValue('GLN', 'label_comp_id', atom_row)
try:
atom_site.setValue('GLN', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DGN -> GLN') not in mod_row:
mod_row.append((res_num, residue, 'DGN -> GLN'))
elif mod_pos[res_num] == 46: # converts D-TRYPTOPHAN (DTR) to TRP
atom_site.setValue('TRP', 'label_comp_id', atom_row)
try:
atom_site.setValue('TRP', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DTR -> TRP') not in mod_row:
mod_row.append((res_num, residue, 'DTR -> TRP'))
elif mod_pos[res_num] == 47: # converts N-ACETYL-SERINE (SAC) to SER
atom_site.setValue('SER', 'label_comp_id', atom_row)
try:
atom_site.setValue('SER', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SAC -> SER') not in mod_row:
mod_row.append((res_num, residue, 'SAC -> SER'))
elif mod_pos[res_num] == 48: # converts D-ASPARAGINE (DSG) to ASN
atom_site.setValue('ASN', 'label_comp_id', atom_row)
try:
atom_site.setValue('ASN', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DSG -> ASN') not in mod_row:
mod_row.append((res_num, residue, 'DSG -> ASN'))
elif mod_pos[res_num] == 49: # converts N-METHYL METHIONINE (MME) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MME -> MET') not in mod_row:
mod_row.append((res_num, residue, 'MME -> MET'))
elif mod_pos[res_num] == 50: # converts N-methyl-L-alanine (MAA) to ALA
atom_site.setValue('ALA', 'label_comp_id', atom_row)
try:
atom_site.setValue('ALA', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MAA -> ALA') not in mod_row:
mod_row.append((res_num, residue, 'MAA -> ALA'))
elif mod_pos[res_num] == 51: # converts 3-FLUOROTYROSINE (YOF) to TYR
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'YOF -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'YOF -> TYR'))
elif mod_pos[res_num] == 52: # converts (4R)-4-fluoro-L-proline (FP9) to PRO
atom_site.setValue('PRO', 'label_comp_id', atom_row)
try:
atom_site.setValue('PRO', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'FP9 -> PRO') not in mod_row:
mod_row.append((res_num, residue, 'FP9 -> PRO'))
elif mod_pos[res_num] == 53: # converts N-formyl-L-valine (FVA) to VAL
atom_site.setValue('VAL', 'label_comp_id', atom_row)
try:
atom_site.setValue('VAL', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'FVA -> VAL') not in mod_row:
mod_row.append((res_num, residue, 'FVA -> VAL'))
elif mod_pos[res_num] == 54: # converts N-methyl-D-leucine (MLU) to LEU
atom_site.setValue('LEU', 'label_comp_id', atom_row)
try:
atom_site.setValue('LEU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MLU -> LEU') not in mod_row:
mod_row.append((res_num, residue, 'MLU -> LEU'))
elif mod_pos[res_num] == 55: # converts (betaR)-3-chloro-beta-hydroxy-L-tyrosine (OMY) to TYR
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'OMY -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'OMY -> TYR'))
elif mod_pos[res_num] == 56: # converts GAMMA-D-GLUTAMIC ACID (FGA) to GLU
atom_site.setValue('GLU', 'label_comp_id', atom_row)
try:
atom_site.setValue('GLU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'FGA -> GLU') not in mod_row:
mod_row.append((res_num, residue, 'FGA -> GLU'))
elif mod_pos[res_num] == 57: # converts N-METHYLPHENYLALANINE (MEA) to PHE
atom_site.setValue('PHE', 'label_comp_id', atom_row)
try:
atom_site.setValue('PHE', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MEA -> PHE') not in mod_row:
mod_row.append((res_num, residue, 'MEA -> PHE'))
elif mod_pos[res_num] == 58: # converts S-(METHYLMERCURY)-L-CYSTEINE (CMH) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CMH -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CMH -> CYS'))
elif mod_pos[res_num] == 59: # converts D-HISTIDINE (DHI) to HIS
atom_site.setValue('HIS', 'label_comp_id', atom_row)
try:
atom_site.setValue('HIS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'DHI -> HIS') not in mod_row:
mod_row.append((res_num, residue, 'DHI -> HIS'))
elif mod_pos[res_num] == 60: # converts SELENOCYSTEINE (SEC) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SEC -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'SEC -> CYS'))
elif mod_pos[res_num] == 61: # converts (betaR)-3-CHLORO-BETA-HYDROXY-D-TYROSINE (OMZ) to TYR
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'OMZ -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'OMZ -> TYR'))
elif mod_pos[res_num] == 62: # converts S-ACETYL-CYSTEINE (SCY) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SCY -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'SCY -> CYS'))
elif mod_pos[res_num] == 63: # converts S-OXYMETHIONINE (MHO) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MHO -> MET') not in mod_row:
mod_row.append((res_num, residue, 'MHO -> MET'))
elif mod_pos[res_num] == 64: # converts D-METHIONINE (MED) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MED -> MET') not in mod_row:
mod_row.append((res_num, residue, 'MED -> MET'))
elif mod_pos[res_num] == 65: # converts S-DIMETHYLARSINOYL-CYSTEINE (CAF) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'CAF -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'CAF -> CYS'))
elif mod_pos[res_num] == 66: # converts META-NITRO-TYROSINE (NIY) to TYR
atom_site.setValue('TYR', 'label_comp_id', atom_row)
try:
atom_site.setValue('TYR', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'NIY -> TYR') not in mod_row:
mod_row.append((res_num, residue, 'NIY -> TYR'))
elif mod_pos[res_num] == 67: # converts O-ACETYLSERINE (OAS) to SER
atom_site.setValue('SER', 'label_comp_id', atom_row)
try:
atom_site.setValue('SER', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'OAS -> SER') not in mod_row:
mod_row.append((res_num, residue, 'OAS -> SER'))
elif mod_pos[res_num] == 68: # converts S-METHYL-THIO-CYSTEINE (SCH) to CYS
atom_site.setValue('CYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('CYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SCH -> CYS') not in mod_row:
mod_row.append((res_num, residue, 'SCH -> CYS'))
elif mod_pos[res_num] == 69: # converts 2-methyl-L-norleucine (MK8) to LEU
atom_site.setValue('LEU', 'label_comp_id', atom_row)
try:
atom_site.setValue('LEU', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'MK8 -> LEU') not in mod_row:
mod_row.append((res_num, residue, 'MK8 -> LEU'))
elif mod_pos[res_num] == 70: # converts METHIONINE SULFOXIDE (SME) to MET
atom_site.setValue('MET', 'label_comp_id', atom_row)
try:
atom_site.setValue('MET', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'SME -> MET') not in mod_row:
mod_row.append((res_num, residue, 'SME -> MET'))
elif mod_pos[res_num] == 71: # converts 5-HYDROXYLYSINE (LYZ) to LYS
atom_site.setValue('LYS', 'label_comp_id', atom_row)
try:
atom_site.setValue('LYS', 'auth_comp_id', atom_row)
except IndexError:
pass
if (res_num, residue, 'LYZ -> LYS') not in mod_row:
mod_row.append((res_num, residue, 'LYZ -> LYS'))
########################################################################
## Notify user about modification made to cif data ##
########################################################################
if DEBUG_MODE:
mod_model_num = len([ msg for msg in cha_row if msg[2] == 'model_num' ])
mod_ins_code = len([ msg for msg in cha_row if msg[2] == 'ins_code' ])
mod_group_PDB = len([ msg for msg in cha_row if msg[2] == 'group_PDB' ])
if mod_model_num != 0:
print ('! {p} {c}: modified atom_site.pdbx_PDB_model_num for {cr} residues to 1.'.format(
p = pdb_code,
c = chain,
cr = mod_model_num))
if mod_ins_code != 0:
print ('! {p} {c}: modified atom_site.pdbx_PDB_ins_code for {cr} residues to "?".'.format(
p = pdb_code,
c = chain,
cr = mod_ins_code))
if mod_group_PDB != 0:
print ('! {p} {c}: modified atom_site.group_PDB for {cr} residues to "ATOM".'.format(
p = pdb_code,
c = chain,
cr = mod_group_PDB))
for residue in reversed(mod_row):
print ('! {p} {c}: modified cif pos {cr} ({nr}).'.format(
p = pdb_code,
c = chain,
cr = residue[0],
ca = residue[1],
nr = residue[2]))
for residue in reversed(rem_row):
print ('! {p} {c}: removed cif pos {cr} ({ca})'.format(
p = pdb_code,
c = chain,
cr = residue[0],
ca = residue[1]))
cif_edits[pdb_code] = block
# write modified pir to disk
for pdb_code in cif_edits:
out = open(os.path.join(output_path, pdb_code + '.cif'), 'w')
writer = PdbxWriter(out)
writer.writeContainer(cif_edits[pdb_code])
# Delete missing entries from the last template sequence to the first
for row in reversed(del_row):
template_grid.del_row(row)
return template_grid
def remove_self_alignment(template_grid, query_name):
""" Removes a self alignment from the final pir alignment to prevent clashes with MODELLER """
to_delete = list()
for row in range(template_grid.get_grid_height()):
if template_grid._pdb_code[row] == query_name:
to_delete.append(row)
for row in reversed(to_delete):
template_grid.del_row(row)
return True
def write_to_file(line_list, fname):
""" Writes the final pir file """
with open(fname, 'w+') as fout:
for line in line_list:
fout.write(line + "\n")
def arg():
import argparse
description = """Creates a MODELLER alignment (*.pir) from a HHSearch results file (*.hhr)."""
epilog= '2016 Harald Voehringer.'
# Initiate a ArgumentParser Class
parser = argparse.ArgumentParser(description = description, epilog = epilog)
# Call add_options to the parser
parser.add_argument('input', help = 'results file from HHsearch with hit list and alignment', metavar = 'FILE')
parser.add_argument('cifs', help = 'path to the folder containing cif files', metavar = 'DIR')
parser.add_argument('pir', help = 'output file (PIR-formatted multiple alignment)', metavar = 'FILE')
parser.add_argument('output', help = 'path to the folder where modified cif files should be written to', metavar = 'DIR')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'verbose mode')
parser.add_argument('-m', nargs = '+', help = 'pick hits with specified indices (e.g. -m 2 5)', metavar = 'INT')
parser.add_argument('-e', type = float, help = 'maximum E-Value threshold (e.g. -e 0.001)', metavar = 'FLOAT')
parser.add_argument('-r', type = float, help = 'residue ratio (filter alignments that have contribute at least residues according to the specified ratio).',
default = 0, metavar = 'FLOAT')
parser.add_argument('-c', help = 'convert non-canonical residues (default = True)', action = 'store_true', default = True)
return parser
def main():
import sys
parser = arg()
args = parser.parse_args(sys.argv[1:])
global DEBUG_MODE
if args.verbose:
DEBUG_MODE = True
query_name, query_chain = get_query_name(args.input)
data = read_result(args.input)
selected_templates = list()
if args.m and not args.e:
selection = map(lambda x: int(x), args.m)
print ('Selected templates {st}.'.format(st = ', '.join(args.m)))
for i in selection:
tmp_info = str(data[i - 1].template_info.split('>')[1])
print ('{i}: {t}'.format(
i = i,
t = tmp_info[0:80]))
selected_templates.append(data[i - 1])
elif args.e and not args.m:
print ('Selected templates satisfying E-val <= {e}'.format(e = args.e))
e_values = { float(j.evalue):i for i, j in enumerate(data) }
selection = sorted([ val for key, val in e_values.items() if key <= args.e ])
for i in selection:
tmp_info = str(data[i - 1].template_info.split('>')[1])
print ('{i}: {t}'.format(
i = i + 1,
t = tmp_info[0:80]))
selected_templates.append(data[i - 1])
elif args.m and args.e:
print ('! Please do not use option -m and -e at the same time ! Exiting.')
sys.exit()
else:
selected_templates = data
print ('Creating pir file using all templates ({n})'.format(
n = len(selected_templates)))
query_grid = create_query_grid(selected_templates) # load query grid
print ('query_grid')
print(query_grid)
gapless_query_grid = create_gapless_grid(query_grid) # remove gaps
print ('gapless_query_grid')
print(gapless_query_grid)
processed_query_grid = process_query_grid(query_grid, gapless_query_grid) # insert gaps
##processed_query_grid = process_query_grid(query_grid, query_grid) # insert gaps
print ('processed_query_grid')
print (processed_query_grid)
glob_seq = derive_global_seq(processed_query_grid, query_name, query_chain) # derive query sequence
template_grid = create_template_grid(selected_templates) # create template grid
print ('template_grid')
print (template_grid)
processed_template_grid = process_template_grid(query_grid, template_grid) # insert gaps to template sequnces
print ('processed_query_grid')
print (processed_query_grid)
print ('hzhu processed_template_grid')
print (processed_template_grid)
final_grid = compare_with_cifs(processed_template_grid, args.cifs, args.output, args.c, args.r) # compare with atom section of cifs
remove_self_alignment(final_grid, query_name) # remove self alignment if any
write_to_file([glob_seq, final_grid.display()], args.pir)
if __name__ == "__main__":
main()
|
hiroakis/ansible
|
refs/heads/devel
|
test/units/executor/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
Maccimo/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/notSuggestedForComprehensionTarget.py
|
19
|
[v<caret>ar for var in range(10)]
|
romankagan/DDBWorkbench
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/multifile/target.append.py
|
83
|
# existing module
A = 1
class Suppa:
def foo(self):
print "bar"
|
nullx002/pychess
|
refs/heads/master
|
lib/pychess/Utils/lutils/strateval.py
|
20
|
"""
This module differs from leval in that it is not optimized for speed.
It checks differences between last and current board, and returns not
scores, but strings describing the differences.
Can be used for commenting on board changes.
"""
from __future__ import absolute_import
from .ldata import *
from pychess.Utils.const import *
from pychess.Utils.lutils.attack import staticExchangeEvaluate, getAttacks, \
defends
from pychess.Utils.lutils.lmove import TCORD, FCORD, FLAG, PROMOTE_PIECE, toSAN
from pychess.Utils.lutils.lmovegen import genCaptures, genAllMoves, newMove
from pychess.Utils.lutils.validator import validateMove
from pychess.Utils.repr import reprColor, reprPiece
from . import leval
def join(items):
if len(items) == 1:
return items[0]
else:
s = "%s %s %s" % (items[-2], _("and"), items[-1])
if len(items) > 2:
s = ", ".join(items[:-2]+[s])
return s
#
# Functions can be of types:
# * Final: Will be shown alone: "mates", "draws"
# * Moves (s): Will always be shown: "put into *"
# * Prefix: Will always be shown: "castles", "promotes"
# * Attack: Will always be shown: "threaten", "preassures", "defendes"
# * Simple: (s) Max one will be shown: "develops", "activity"
# * State: (s) Will always be shown: "new *"
# * Tip: (s) Will sometimes be shown: "pawn storm", "cramped position"
#
def final_status (model, ply, phase):
if ply == model.ply:
if model.status == DRAW:
yield _("draws")
elif model.status in (WHITEWON,BLACKWON):
yield _("mates")
def offencive_moves_check (model, ply, phase):
if model.getBoardAtPly(ply).board.isChecked():
yield _("puts opponent in check")
def defencive_moves_safety (model, ply, phase):
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
if board.arBoard[TCORD(model.getMoveAtPly(ply-1).move)] != KING:
return
color = oldboard.color
opcolor = 1-color
delta_eval_king = leval.evalKing(board, color, phase) - \
leval.evalKing(oldboard, color, phase)
# PyChess points tropism to queen for phase <= 3. Thus we set a high phase
delta_eval_tropism = leval.evalKingTropism(board, opcolor, 10) - \
leval.evalKingTropism(oldboard, opcolor, 10)
# Notice, that tropism was negative
delta_score = delta_eval_king - delta_eval_tropism/2
if delta_score > 35:
yield _("improves king safety")
elif delta_score > 15:
yield _("slightly improves king safety")
def offencive_moves_rook (model, ply, phase):
move = model.getMoveAtPly(ply-1).move
fcord = FCORD(move)
tcord = TCORD(move)
board = model.getBoardAtPly(ply).board
color = 1-board.color
opcolor = 1-color
# We also detect rook-to-open castlings
if board.arBoard[tcord] == KING:
if FLAG(move) == QUEEN_CASTLE:
fcord = board.ini_rooks[color][0]
tcord = tcord+1
elif FLAG(move) == KING_CASTLE:
fcord = board.ini_rooks[color][1]
tcord = tcord-1
if board.arBoard[tcord] != ROOK:
return
color = 1-board.color
opcolor = 1-color
pawns = board.boards[color][PAWN]
oppawns = board.boards[opcolor][PAWN]
ffile = fileBits[FILE(FCORD(move))]
tfile = fileBits[FILE(tcord)]
if ffile & pawns and not tfile & pawns and bin(pawns).count("1") >= 3:
if not tfile & oppawns:
yield _("moves a rook to an open file")
else: yield _("moves an rook to a half-open file")
def offencive_moves_fianchetto (model, ply, phase):
board = model.getBoardAtPly(ply).board
tcord = TCORD(model.getMoveAtPly(ply-1).move)
movingcolor = 1-board.color
if movingcolor == WHITE:
if board.castling & W_OO and tcord == G2:
yield _("moves bishop into fianchetto: %s") % "g2"
elif board.castling & W_OOO and tcord == B2:
yield _("moves bishop into fianchetto: %s") % "b2"
else:
if board.castling & B_OO and tcord == G7:
yield _("moves bishop into fianchetto: %s") % "g7"
elif board.castling & B_OOO and tcord == B7:
yield _("moves bishop into fianchetto: %s") % "b7"
def prefix_type (model, ply, phase):
flag = FLAG(model.getMoveAtPly(ply-1).move)
if flag in PROMOTIONS:
yield _("promotes a Pawn to a %s") % reprPiece[PROMOTE_PIECE(flag)]
elif flag in (KING_CASTLE, QUEEN_CASTLE):
yield _("castles")
def attack_type (model, ply, phase):
# We set bishop value down to knight value, as it is what most people expect
bishopBackup = PIECE_VALUES[BISHOP]
PIECE_VALUES[BISHOP] = PIECE_VALUES[KNIGHT]
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
if ply - model.lowply >= 2:
oldmove = model.getMoveAtPly(ply-2).move
oldboard3 = model.getBoardAtPly(ply-2).board
else: oldmove = None
move = model.getMoveAtPly(ply-1).move
tcord = TCORD(move)
if oldboard.arBoard[tcord] != EMPTY:
if not (board.variant == FISCHERRANDOMCHESS and \
FLAG(move) in (KING_CASTLE, QUEEN_CASTLE)):
if oldmove and oldboard3.arBoard[TCORD(oldmove)] != EMPTY and \
TCORD(oldmove) == tcord:
yield _("takes back material")
else:
see = staticExchangeEvaluate(oldboard, move)
if see < 0:
yield _("sacrifies material")
elif see == 0:
yield _("exchanges material")
elif see > 0:
yield _("captures material")
PIECE_VALUES[BISHOP] = bishopBackup
def defencive_moves_tactic (model, ply, phase):
# ------------------------------------------------------------------------ #
# Test if we threat something, or at least put more pressure on it #
# ------------------------------------------------------------------------ #
# We set bishop value down to knight value, as it is what most people expect
bishopBackup = PIECE_VALUES[BISHOP]
PIECE_VALUES[BISHOP] = PIECE_VALUES[KNIGHT]
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
move = model.getMoveAtPly(ply-1).move
fcord = FCORD(move)
tcord = TCORD(move)
piece = board.arBoard[tcord]
found_threatens = []
found_increases = []
# What do we attack now?
board.setColor(1-board.color)
for ncap in genCaptures(board):
# getCaptures also generate promotions
if FLAG(ncap) in PROMOTIONS:
continue
# We are only interested in the attacks of the piece we just moved
if FCORD(ncap) != TCORD (move):
continue
# We don't want to move back
if TCORD(ncap) == FCORD(move):
continue
# We don't thread the king. We check him! (in another function)
if board.arBoard[TCORD(ncap)] == KING:
continue
# If we also was able to attack that cord last time, we don't care
if validateMove(oldboard, newMove(FCORD(move), TCORD(ncap))):
continue
# Test if we threats our enemy, at least more than before
see0 = staticExchangeEvaluate(oldboard, TCORD(ncap), 1-oldboard.color)
see1 = staticExchangeEvaluate(board, TCORD(ncap), 1-oldboard.color)
if see1 > see0:
# If a new winning capture has been created
if see1 > 0:
# Find the easiest attack
attacks = getAttacks (board, TCORD(ncap), board.color)
v, cord = min((PIECE_VALUES[board.arBoard[fc]],fc)
for fc in iterBits(attacks))
easiestAttack = newMove(cord, TCORD(ncap))
found_threatens.append(toSAN(board,easiestAttack, True))
# Even though we might not yet be strong enough, we might still
# have strengthened another friendly attack
else:
found_increases.append(reprCord[TCORD(ncap)])
board.setColor(1-board.color)
# -------------------------------------------------------------------- #
# Test if we defend a one of our pieces #
# -------------------------------------------------------------------- #
found_defends = []
# Test which pieces were under attack
used = []
for ncap in genCaptures(board):
# getCaptures also generate promotions
if FLAG(ncap) in PROMOTIONS:
continue
# We don't want to know about the same cord more than once
if TCORD(ncap) in used:
continue
used.append(TCORD(ncap))
# If the attack was poining on the piece we just moved, we ignore it
if TCORD(ncap) == FCORD(move) or TCORD(ncap) == TCORD(move):
continue
# If we were already defending the piece, we don't send a new
# message
if defends(oldboard, FCORD(move), TCORD(ncap)):
continue
# If the attack was not strong, we ignore it
see = staticExchangeEvaluate(oldboard, ncap)
if see < 0: continue
v = defends(board, TCORD(move), TCORD(ncap))
# If the defend didn't help, it doesn't matter. Like defending a
# bishop, threatened by a pawn, with a queen.
# But on the other hand - it might still be a defend...
# newsee = staticExchangeEvaluate(board, ncap)
# if newsee <= see: continue
if v:
found_defends.append(reprCord[TCORD(ncap)])
# ------------------------------------------------------------------------ #
# Test if we are rescuing an otherwise exposed piece #
# ------------------------------------------------------------------------ #
# Rescuing is only an option, if our own move wasn't an attack
if oldboard.arBoard[tcord] == EMPTY:
see0 = staticExchangeEvaluate(oldboard, fcord, oldboard.color)
see1 = staticExchangeEvaluate(board, tcord, oldboard.color)
if see1 > see0 and see1 > 0:
yield _("rescues a %s") % reprPiece[board.arBoard[tcord]].lower()
if found_threatens:
yield _("threatens to win material by %s") % join(found_threatens)
if found_increases:
yield _("increases the pressure on %s") % join(found_increases)
if found_defends:
yield _("defends %s") % join(found_defends)
PIECE_VALUES[BISHOP] = bishopBackup
def offencive_moves_pin (model, ply, phase):
board = model.getBoardAtPly(ply).board
move = model.getMoveAtPly(ply-1).move
fcord = FCORD(move)
tcord = TCORD(move)
piece = board.arBoard[tcord]
ray = 0
if piece in (BISHOP, QUEEN):
ray |= (ray45[tcord] | ray135[tcord]) & ~(ray45[fcord] | ray135[fcord])
if piece in (ROOK, QUEEN):
ray |= (ray00[tcord] | ray90[tcord]) & ~(ray00[fcord] | ray90[fcord])
if ray:
for c in iterBits(ray & board.friends[board.color]):
# We don't pin on pieces that are less worth than us
if not PIECE_VALUES[piece] < PIECE_VALUES[board.arBoard[c]]:
continue
# There should be zero friendly pieces in between
ray = fromToRay[tcord][c]
if ray & board.friends[1-board.color]:
continue
# There should be exactly one opponent piece in between
op = clearBit(ray & board.friends[board.color], c)
if bin(op).count("1") != 1:
continue
# The king can't be pinned
pinned = lastBit(op)
oppiece = board.arBoard[pinned]
if oppiece == KING:
continue
# Yield
yield _("pins an enemy %(oppiece)s on the %(piece)s at %(cord)s") % {
'oppiece': reprPiece[oppiece].lower(),
'piece': reprPiece[board.arBoard[c]].lower(),
'cord': reprCord[c]}
def state_outpost (model, ply, phase):
if phase >= 6:
# Doesn't make sense in endgame
return
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
color = 1-board.color
opcolor = 1-color
wpawns = board.boards[WHITE][PAWN]
oldwpawns = oldboard.boards[WHITE][PAWN]
bpawns = board.boards[BLACK][PAWN]
oldbpawns = oldboard.boards[BLACK][PAWN]
wpieces = board.boards[WHITE][BISHOP] | board.boards[WHITE][KNIGHT]
oldwpieces = oldboard.boards[WHITE][BISHOP] | oldboard.boards[WHITE][KNIGHT]
bpieces = board.boards[BLACK][BISHOP] | board.boards[BLACK][KNIGHT]
oldbpieces = oldboard.boards[BLACK][BISHOP] | oldboard.boards[BLACK][KNIGHT]
for cord in iterBits(wpieces):
sides = isolaniMask[FILE(cord)]
front = passedPawnMask[WHITE][cord]
if outpost[WHITE][cord] and not bpawns & sides & front and \
(not oldwpieces & bitPosArray[cord] or \
oldbpawns & sides & front):
yield 35, _("White has a new piece in outpost: %s") % reprCord[cord]
for cord in iterBits(bpieces):
sides = isolaniMask[FILE(cord)]
front = passedPawnMask[BLACK][cord]
if outpost[BLACK][cord] and not wpawns & sides & front and \
(not oldbpieces & bitPosArray[cord] or \
oldwpawns & sides & front):
yield 35, _("Black has a new piece in outpost: %s") % reprCord[cord]
def state_pawn (model, ply, phase):
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
color = 1-board.color
opcolor = 1-color
move = model.getMoveAtPly(ply-1).move
pawns = board.boards[color][PAWN]
oppawns = board.boards[opcolor][PAWN]
oldpawns = oldboard.boards[color][PAWN]
oldoppawns = oldboard.boards[opcolor][PAWN]
# Passed pawns
for cord in iterBits(pawns):
if not oppawns & passedPawnMask[color][cord]:
if color == WHITE:
frontCords = fromToRay[cord][cord|56]
else: frontCords = fromToRay[cord][cord&7]
if frontCords & pawns:
continue
# Was this a passed pawn before?
if oldpawns & bitPosArray[cord] and \
not oldoppawns & passedPawnMask[color][cord] and \
not frontCords & oldpawns:
continue
# Is this just a passed pawn that has been moved?
if TCORD(move) == cord:
frontCords |= bitPosArray[cord]
if not frontCords & oldpawns and \
not oldoppawns & passedPawnMask[color][FCORD(move)]:
continue
score = (passedScores[color][cord>>3] * phase)
yield score, _("%(color)s has a new passed pawn on %(cord)s") % {
'color': reprColor[color], 'cord': reprCord[cord]}
# Double pawns
found_doubles = []
found_halfopen_doubles = []
found_white_isolates = []
found_black_isolates = []
for file in range(8):
bits = fileBits[file]
count = bin(pawns & bits).count("1")
oldcount = bin(oldpawns & bits).count("1")
opcount = bin(oppawns & bits).count("1")
oldopcount = bin(oldoppawns & bits).count("1")
# Single pawn -> double pawns
if count > oldcount >= 1:
if not opcount:
found_halfopen_doubles.append(reprFile[file])
else: found_doubles.append(reprFile[file])
# Closed file double pawn -> half-open file double pawn
elif count > 1 and opcount == 0 and oldopcount > 0:
found_halfopen_doubles.append(reprFile[file])
# Isolated pawns
if color == WHITE:
wpawns = pawns
oldwpawns = oldpawns
bpawns = oppawns
oldbpawns = oldoppawns
else:
bpawns = pawns
oldbpawns = oldpawns
wpawns = oppawns
oldwpawns = oldoppawns
if wpawns & bits and not wpawns & isolaniMask[file] and \
(not oldwpawns & bits or oldwpawns & isolaniMask[file]):
found_white_isolates.append(reprFile[file])
if bpawns & bits and not bpawns & isolaniMask[file] and \
(not oldbpawns & bits or oldbpawns & isolaniMask[file]):
found_black_isolates.append(reprFile[file])
# We need to take care of 'worstcases' like: "got new double pawns in the a
# file, in the half-open b, c and d files and in the open e and f files"
doubles_count = len(found_doubles) + len(found_halfopen_doubles)
if doubles_count > 0:
parts = []
for type_, list_ in (("", found_doubles),
(_("half-open")+" ", found_halfopen_doubles)):
if len(list_) == 1:
parts.append(_("in the %(x)s%(y)s file") % {'x': type_, 'y': list_[0]})
elif len(list_) >= 2:
parts.append(_("in the %(x)s%(y)s files") % {'x': type_, 'y': join(list_)})
if doubles_count == 1:
s = _("%(color)s got a double pawn %(place)s")
else: s = _("%(color)s got new double pawns %(place)s")
yield (8+phase)*2*doubles_count, s % {'color': reprColor[color], 'place': join(parts)}
for (color_, list_) in ((WHITE, found_white_isolates),
(BLACK, found_black_isolates)):
if list_:
yield 20*len(list_), ngettext("%(color)s got an isolated pawn in the %(x)s file",
"%(color)s got isolated pawns in the %(x)s files",
len(list_)) % {'color': reprColor[color_], 'x': join(list_)}
# Stone wall
if stonewall[color] & pawns == stonewall[color] and \
stonewall[color] & oldpawns != stonewall[color]:
yield 10, _("%s moves pawns into stonewall formation") % reprColor[color]
def state_destroysCastling (model, ply, phase):
""" Does the move destroy the castling ability of the opponent """
# If the move is a castling, nobody will every care if the castling
# possibilities has changed
if FLAG(model.getMoveAtPly(ply-1).move) in (QUEEN_CASTLE, KING_CASTLE):
return
oldcastling = model.getBoardAtPly(ply-1).board.castling
castling = model.getBoardAtPly(ply).board.castling
if oldcastling & W_OOO and not castling & W_OOO:
if oldcastling & W_OO and not castling & W_OO:
yield 900/phase, _("%s can no longer castle") % reprColor[WHITE]
else: yield 400/phase, _("%s can no longer castle in queenside") % reprColor[WHITE]
elif oldcastling & W_OO and not castling & W_OO:
yield 500/phase, _("%s can no longer castle in kingside") % reprColor[WHITE]
if oldcastling & B_OOO and not castling & B_OOO:
if oldcastling & B_OO and not castling & B_OO:
yield 900/phase, _("%s can no longer castle") % reprColor[BLACK]
else: yield 400/phase, _("%s can no longer castle in queenside") % reprColor[BLACK]
elif oldcastling & B_OO and not castling & B_OO:
yield 500/phase, _("%s can no longer castle in kingside") % reprColor[BLACK]
def state_trappedBishops (model, ply, phase):
""" Check for bishops trapped at A2/H2/A7/H7 """
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
opcolor = board.color
color = 1-opcolor
move = model.getMoveAtPly(ply-1).move
tcord = TCORD(move)
# Only a pawn is able to trap a bishop
if board.arBoard[tcord] != PAWN:
return
if tcord == B3:
cord = A2
elif tcord == G3:
cord = H2
elif tcord == B6:
cord = A7
elif tcord == G6:
cord = H7
else:
return
s = leval.evalTrappedBishops (board, opcolor)
olds = leval.evalTrappedBishops (oldboard, opcolor)
# We have got more points -> We have trapped a bishop
if s > olds:
yield 300/phase, _("%(opcolor)s has a new trapped bishop on %(cord)s") % {
'opcolor': reprColor[opcolor], 'cord': reprCord[cord]}
def simple_tropism (model, ply, phase):
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
color = oldboard.color
move = model.getMoveAtPly(ply-1).move
fcord = FCORD(move)
tcord = TCORD(move)
arBoard = board.arBoard
if arBoard[tcord] != PAWN:
score = leval.evalKingTropism(board, color, phase)
oldscore = leval.evalKingTropism(oldboard, color, phase)
else:
if color == WHITE:
rank23 = brank67[BLACK]
else: rank23 = brank67[WHITE]
if bitPosArray[fcord] & rank23:
yield 2, _("develops a pawn: %s") % reprCord[tcord]
else: yield 1, _("brings a pawn closer to the backrow: %s") % \
reprCord[tcord]
return
king = board.kings[color]
opking = board.kings[1-color]
if score > oldscore:
# in FISCHERRANDOMCHESS unusual casting case the tcord is
# the involved rook's position, not the king's destination!
flag = move >> 12
if flag in (KING_CASTLE, QUEEN_CASTLE):
piece = KING
else:
piece = arBoard[tcord]
if phase >= 5 or distance[piece][fcord][opking] < \
distance[piece][fcord][king]:
yield score-oldscore, _("brings a %(piece)s closer to enemy king: %(cord)s") % {
'piece': reprPiece[piece], 'cord': reprCord[tcord]}
else:
yield (score-oldscore)*2, _("develops a %(piece)s: %(cord)s") % {
'piece': reprPiece[piece].lower(), 'cord': reprCord[tcord]}
def simple_activity (model, ply, phase):
board = model.getBoardAtPly(ply).board
oldboard = model.getBoardAtPly(ply-1).board
color = 1-board.color
move = model.getMoveAtPly(ply-1).move
fcord = FCORD(move)
tcord = TCORD(move)
board.setColor(1-board.color)
moves = len([m for m in genAllMoves(board) if FCORD(m) == tcord])
board.setColor(1-board.color)
oldmoves = len([m for m in genAllMoves(oldboard) if FCORD(m) == fcord])
if moves > oldmoves:
yield (moves-oldmoves)/2, _("places a %(piece)s more active: %(cord)s") % {
'piece': reprPiece[board.arBoard[tcord]].lower(), 'cord': reprCord[tcord]}
def tip_pawnStorm (model, ply, phase):
""" If players are castled in different directions we should storm in
opponent side """
if phase >= 6:
# We don't use this in endgame
return
board = model.getBoardAtPly(ply).board
#if not board.hasCastled[WHITE] or not board.hasCastled[BLACK]:
# # Only applies after castling for both sides
# return
wking = board.boards[WHITE][KING]
bking = board.boards[BLACK][KING]
wleft = bin(board.boards[WHITE][PAWN] & left).count("1")
wright = bin(board.boards[WHITE][PAWN] & right).count("1")
bleft = bin(board.boards[BLACK][PAWN] & left).count("1")
bright = bin(board.boards[BLACK][PAWN] & right).count("1")
if wking & left and bking & right:
if wright > bright:
yield (wright+3-bright)*10, _("White should do pawn storm in right")
elif bleft > wleft:
yield (bright+3-wright)*10, _("Black should do pawn storm in left")
if wking & right and bking & left:
if wleft > bleft:
yield (wleft+3-bleft)*10, _("White should do pawn storm in left")
if bright > wright:
yield (bleft+3-wleft)*10, _("Black should do pawn storm in right")
def tip_mobility (model, ply, phase):
board = model.getBoardAtPly(ply).board
colorBackup = board.color
# People need a chance to get developed
#if model.ply < 16:
# return
board.setColor(WHITE)
wmoves = len([move for move in genAllMoves(board) if \
KNIGHT <= board.arBoard[FCORD(move)] <= QUEEN and \
bitPosArray[TCORD(move)] & brank48[WHITE] and \
staticExchangeEvaluate(board, move) >= 0])
board.setColor(BLACK)
bmoves = len([move for move in genAllMoves(board) if \
KNIGHT <= board.arBoard[FCORD(move)] <= QUEEN and \
bitPosArray[TCORD(move)] & brank48[BLACK] and \
staticExchangeEvaluate(board, move) >= 0])
board.setColor(colorBackup)
if wmoves-phase >= (bmoves+1)*7:
yield wmoves-bmoves, _("Black has a rather cramped position")
elif wmoves-phase >= (bmoves+1)*3:
yield wmoves-bmoves, _("Black has a slightly cramped position")
elif bmoves-phase >= (wmoves+1)*7:
yield wmoves-bmoves, _("White has a rather cramped position")
elif bmoves-phase >= (wmoves+1)*3:
yield wmoves-bmoves, _("White has a slightly cramped position")
|
BartDeWaal/libsigrokdecode
|
refs/heads/unclean
|
decoders/i2c/pd.py
|
11
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2010-2014 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
# TODO: Look into arbitration, collision detection, clock synchronisation, etc.
# TODO: Implement support for 10bit slave addresses.
# TODO: Implement support for inverting SDA/SCL levels (0->1 and 1->0).
# TODO: Implement support for detecting various bus errors.
import sigrokdecode as srd
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>:
- 'START' (START condition)
- 'START REPEAT' (Repeated START condition)
- 'ADDRESS READ' (Slave address, read)
- 'ADDRESS WRITE' (Slave address, write)
- 'DATA READ' (Data, read)
- 'DATA WRITE' (Data, write)
- 'STOP' (STOP condition)
- 'ACK' (ACK bit)
- 'NACK' (NACK bit)
- 'BITS' (<pdata>: list of data/address bits and their ss/es numbers)
<pdata> is the data or address byte associated with the 'ADDRESS*' and 'DATA*'
command. Slave addresses do not include bit 0 (the READ/WRITE indication bit).
For example, a slave address field could be 0x51 (instead of 0xa2).
For 'START', 'START REPEAT', 'STOP', 'ACK', and 'NACK' <pdata> is None.
'''
# CMD: [annotation-type-index, long annotation, short annotation]
proto = {
'START': [0, 'Start', 'S'],
'START REPEAT': [1, 'Start repeat', 'Sr'],
'STOP': [2, 'Stop', 'P'],
'ACK': [3, 'ACK', 'A'],
'NACK': [4, 'NACK', 'N'],
'BIT': [5, 'Bit', 'B'],
'ADDRESS READ': [6, 'Address read', 'AR'],
'ADDRESS WRITE': [7, 'Address write', 'AW'],
'DATA READ': [8, 'Data read', 'DR'],
'DATA WRITE': [9, 'Data write', 'DW'],
}
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 2
id = 'i2c'
name = 'I²C'
longname = 'Inter-Integrated Circuit'
desc = 'Two-wire, multi-master, serial bus.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['i2c']
channels = (
{'id': 'scl', 'name': 'SCL', 'desc': 'Serial clock line'},
{'id': 'sda', 'name': 'SDA', 'desc': 'Serial data line'},
)
options = (
{'id': 'address_format', 'desc': 'Displayed slave address format',
'default': 'shifted', 'values': ('shifted', 'unshifted')},
)
annotations = (
('start', 'Start condition'),
('repeat-start', 'Repeat start condition'),
('stop', 'Stop condition'),
('ack', 'ACK'),
('nack', 'NACK'),
('bit', 'Data/address bit'),
('address-read', 'Address read'),
('address-write', 'Address write'),
('data-read', 'Data read'),
('data-write', 'Data write'),
('warnings', 'Human-readable warnings'),
)
annotation_rows = (
('bits', 'Bits', (5,)),
('addr-data', 'Address/Data', (0, 1, 2, 3, 4, 6, 7, 8, 9)),
('warnings', 'Warnings', (10,)),
)
binary = (
('address-read', 'Address read'),
('address-write', 'Address write'),
('data-read', 'Data read'),
('data-write', 'Data write'),
)
def __init__(self, **kwargs):
self.samplerate = None
self.ss = self.es = self.ss_byte = -1
self.samplenum = None
self.bitcount = 0
self.databyte = 0
self.wr = -1
self.is_repeat_start = 0
self.state = 'FIND START'
self.oldscl = self.oldsda = 1
self.oldpins = [1, 1]
self.pdu_start = None
self.pdu_bits = 0
self.bits = []
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_bitrate = self.register(srd.OUTPUT_META,
meta=(int, 'Bitrate', 'Bitrate from Start bit to Stop bit'))
def putx(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def putp(self, data):
self.put(self.ss, self.es, self.out_python, data)
def putb(self, data):
self.put(self.ss, self.es, self.out_binary, data)
def is_start_condition(self, scl, sda):
# START condition (S): SDA = falling, SCL = high
if (self.oldsda == 1 and sda == 0) and scl == 1:
return True
return False
def is_data_bit(self, scl, sda):
# Data sampling of receiver: SCL = rising
if self.oldscl == 0 and scl == 1:
return True
return False
def is_stop_condition(self, scl, sda):
# STOP condition (P): SDA = rising, SCL = high
if (self.oldsda == 0 and sda == 1) and scl == 1:
return True
return False
def found_start(self, scl, sda):
self.ss, self.es = self.samplenum, self.samplenum
self.pdu_start = self.samplenum
self.pdu_bits = 0
cmd = 'START REPEAT' if (self.is_repeat_start == 1) else 'START'
self.putp([cmd, None])
self.putx([proto[cmd][0], proto[cmd][1:]])
self.state = 'FIND ADDRESS'
self.bitcount = self.databyte = 0
self.is_repeat_start = 1
self.wr = -1
self.bits = []
# Gather 8 bits of data plus the ACK/NACK bit.
def found_address_or_data(self, scl, sda):
# Address and data are transmitted MSB-first.
self.databyte <<= 1
self.databyte |= sda
# Remember the start of the first data/address bit.
if self.bitcount == 0:
self.ss_byte = self.samplenum
# Store individual bits and their start/end samplenumbers.
# In the list, index 0 represents the LSB (I²C transmits MSB-first).
self.bits.insert(0, [sda, self.samplenum, self.samplenum])
if self.bitcount > 0:
self.bits[1][2] = self.samplenum
if self.bitcount == 7:
self.bitwidth = self.bits[1][2] - self.bits[2][2]
self.bits[0][2] += self.bitwidth
# Return if we haven't collected all 8 + 1 bits, yet.
if self.bitcount < 7:
self.bitcount += 1
return
d = self.databyte
if self.state == 'FIND ADDRESS':
# The READ/WRITE bit is only in address bytes, not data bytes.
self.wr = 0 if (self.databyte & 1) else 1
if self.options['address_format'] == 'shifted':
d = d >> 1
bin_class = -1
if self.state == 'FIND ADDRESS' and self.wr == 1:
cmd = 'ADDRESS WRITE'
bin_class = 1
elif self.state == 'FIND ADDRESS' and self.wr == 0:
cmd = 'ADDRESS READ'
bin_class = 0
elif self.state == 'FIND DATA' and self.wr == 1:
cmd = 'DATA WRITE'
bin_class = 3
elif self.state == 'FIND DATA' and self.wr == 0:
cmd = 'DATA READ'
bin_class = 2
self.ss, self.es = self.ss_byte, self.samplenum + self.bitwidth
self.putp(['BITS', self.bits])
self.putp([cmd, d])
self.putb((bin_class, bytes([d])))
for bit in self.bits:
self.put(bit[1], bit[2], self.out_ann, [5, ['%d' % bit[0]]])
if cmd.startswith('ADDRESS'):
self.ss, self.es = self.samplenum, self.samplenum + self.bitwidth
w = ['Write', 'Wr', 'W'] if self.wr else ['Read', 'Rd', 'R']
self.putx([proto[cmd][0], w])
self.ss, self.es = self.ss_byte, self.samplenum
self.putx([proto[cmd][0], ['%s: %02X' % (proto[cmd][1], d),
'%s: %02X' % (proto[cmd][2], d), '%02X' % d]])
# Done with this packet.
self.bitcount = self.databyte = 0
self.bits = []
self.state = 'FIND ACK'
def get_ack(self, scl, sda):
self.ss, self.es = self.samplenum, self.samplenum + self.bitwidth
cmd = 'NACK' if (sda == 1) else 'ACK'
self.putp([cmd, None])
self.putx([proto[cmd][0], proto[cmd][1:]])
# There could be multiple data bytes in a row, so either find
# another data byte or a STOP condition next.
self.state = 'FIND DATA'
def found_stop(self, scl, sda):
# Meta bitrate
elapsed = 1 / float(self.samplerate) * (self.samplenum - self.pdu_start + 1)
bitrate = int(1 / elapsed * self.pdu_bits)
self.put(self.ss_byte, self.samplenum, self.out_bitrate, bitrate)
cmd = 'STOP'
self.ss, self.es = self.samplenum, self.samplenum
self.putp([cmd, None])
self.putx([proto[cmd][0], proto[cmd][1:]])
self.state = 'FIND START'
self.is_repeat_start = 0
self.wr = -1
self.bits = []
def decode(self, ss, es, data):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
for (self.samplenum, pins) in data:
# Ignore identical samples early on (for performance reasons).
if self.oldpins == pins:
continue
self.oldpins, (scl, sda) = pins, pins
self.pdu_bits += 1
# State machine.
if self.state == 'FIND START':
if self.is_start_condition(scl, sda):
self.found_start(scl, sda)
elif self.state == 'FIND ADDRESS':
if self.is_data_bit(scl, sda):
self.found_address_or_data(scl, sda)
elif self.state == 'FIND DATA':
if self.is_data_bit(scl, sda):
self.found_address_or_data(scl, sda)
elif self.is_start_condition(scl, sda):
self.found_start(scl, sda)
elif self.is_stop_condition(scl, sda):
self.found_stop(scl, sda)
elif self.state == 'FIND ACK':
if self.is_data_bit(scl, sda):
self.get_ack(scl, sda)
# Save current SDA/SCL values for the next round.
self.oldscl, self.oldsda = scl, sda
|
MQQiang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_longexp.py
|
182
|
import unittest
from test import support
class LongExpText(unittest.TestCase):
def test_longexp(self):
REPS = 65580
l = eval("[" + "2," * REPS + "]")
self.assertEqual(len(l), REPS)
def test_main():
support.run_unittest(LongExpText)
if __name__=="__main__":
test_main()
|
Empeeric/dirometer
|
refs/heads/master
|
django/core/exceptions.py
|
292
|
"""
Global Django exception and warning classes.
"""
class DjangoRuntimeWarning(RuntimeWarning):
pass
class ObjectDoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"The query returned multiple objects when only one was expected."
pass
class SuspiciousOperation(Exception):
"The user did something suspicious"
pass
class PermissionDenied(Exception):
"The user did not have permission to do that"
pass
class ViewDoesNotExist(Exception):
"The requested view does not exist"
pass
class MiddlewareNotUsed(Exception):
"This middleware is not used in this server configuration"
pass
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
import operator
from django.utils.encoding import force_unicode
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
if hasattr(self, 'message_dict'):
if error_dict:
for k, v in self.message_dict.items():
error_dict.setdefault(k, []).extend(v)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
|
debugger06/MiroX
|
refs/heads/master
|
lib/downloader.py
|
2
|
# Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import datetime
import os
import random
import logging
import time
from miro.gtcache import gettext as _
from miro.database import DDBObject, ObjectNotFoundError
from miro.dl_daemon import daemon, command
from miro.download_utils import (next_free_filename, get_file_url_path,
next_free_directory, filter_directory_name)
from miro.util import (get_torrent_info_hash, returns_unicode, check_u,
returns_filename, unicodify, check_f, to_uni, is_magnet_uri)
from miro import app
from miro import dialogs
from miro import displaytext
from miro import eventloop
from miro import httpclient
from miro import models
from miro import prefs
from miro.plat.utils import samefile, unicode_to_filename
from miro import flashscraper
from miro import fileutil
from miro.fileobject import FilenameType
class DownloadStateManager(object):
"""DownloadStateManager: class to store state information about the
downloader.
Commands to the downloader is batched and sent every second. This is
based on the premise that commands for a particular download id can
be completely superceded by a subsequent command, with the exception
of a pause/resume pair. For example, a stop command will completely
supecede a pause command, so if the 2 are sent in quick succession
only the stop command will be sent by the downloader. The exception
to this rule is the pause/resume pair which acts like matter and
anti-matter, which will nuke itself when they come into contact
(but dies with not even a whimper instead of a gorgeous display).
"""
STOP = command.DownloaderBatchCommand.STOP
RESUME = command.DownloaderBatchCommand.RESUME
PAUSE = command.DownloaderBatchCommand.PAUSE
RESTORE = command.DownloaderBatchCommand.RESTORE
UPDATE_INTERVAL = 1
def __init__(self):
self.total_up_rate = 0
self.total_down_rate = 0
# a hash of download ids that the server knows about.
self.downloads = {}
self.daemon_starter = None
self.startup_commands = dict()
self.commands = dict()
self.bulk_mode = False
def set_bulk_mode(self):
self.bulk_mode = True
def send_initial_updates(self):
commands = self.startup_commands
self.startup_commands = None
if commands:
c = command.DownloaderBatchCommand(RemoteDownloader.dldaemon,
commands)
c.send()
def send_updates(self):
commands = self.commands
self.commands = dict()
if commands:
c = command.DownloaderBatchCommand(RemoteDownloader.dldaemon,
commands)
c.send()
elif self.bulk_mode:
from miro.messages import DownloaderSyncCommandComplete
# If we did a pause/resume/cancel all, and there weren't any
# items in the list to send nobody would re-enable the auto-sort.
# So we do it here.
DownloaderSyncCommandComplete().send_to_frontend()
# Reset the bulk mode notification.
self.bulk_mode = False
self.start_updates()
def start_updates(self):
eventloop.add_timeout(self.UPDATE_INTERVAL,
self.send_updates,
"Send Download Command Updates")
def get_download(self, dlid):
try:
return self.downloads[dlid]
except KeyError:
return None
def add_download(self, dlid, downloader):
self.downloads[dlid] = downloader
def delete_download(self, dlid):
try:
del self.downloads[dlid]
except KeyError:
return False
else:
return True
def daemon_started(self):
return self.daemon_starter and self.daemon_starter.started
def queue(self, identifier, cmd, args):
if not self.downloads.has_key(identifier):
raise ValueError('add_download() not called before queue()')
# Catch restores first, we will flush them when the downloader's
# started.
if cmd == self.RESTORE and not self.daemon_started():
self.startup_commands[identifier] = (cmd, args)
return
exists = self.commands.has_key(identifier)
# Make sure that a pause/resume pair cancel each other out. For
# others, assume that a subsequent command can completely supercede
# the previous command.
if exists:
old_cmd, unused = self.commands[identifier]
if (old_cmd == self.RESUME and cmd == self.PAUSE or
old_cmd == self.PAUSE and cmd == self.RESUME):
# Make sure that we unfreeze it
self.downloads[identifier].status_updates_frozen = False
del self.commands[identifier]
return
# HACK: When we pause and resume we currently send a download
# command, then a restore downloader command which doesn't
# do anything. This also breaks our general assumption that a
# current command can completely supercede any previous queued
# command so if we see it disable it. I'm not actually
# sure why we'd want to send a restore command in this case.
if cmd == self.RESTORE:
logging.info('not restoring active download')
return
# Freeze the status updates, but don't freeze if it is a restore.
if not cmd == self.RESTORE:
self.downloads[identifier].status_updates_frozen = True
self.commands[identifier] = (cmd, args)
def init_controller(self):
"""Intializes the download daemon controller.
This doesn't actually start up the downloader daemon, that's done
in startup_downloader. Commands will be queued until then.
"""
self.daemon_starter = DownloadDaemonStarter()
def startup_downloader(self):
"""Initialize the downloaders.
This method currently does 2 things. It deletes any stale files
self in Incomplete Downloads, then it restarts downloads that have
been restored from the database. It must be called before any
RemoteDownloader objects get created.
"""
self.daemon_starter.startup()
# Now that the daemon has started, we can process updates.
self.send_initial_updates()
self.start_updates()
def shutdown_downloader(self, callback=None):
if self.daemon_starter:
self.daemon_starter.shutdown(callback)
elif callback:
callback()
def get_downloader_by_dlid(dlid):
try:
return RemoteDownloader.get_by_dlid(dlid)
except ObjectNotFoundError:
return None
@returns_unicode
def generate_dlid():
dlid = u"download%08d" % random.randint(0, 99999999)
while get_downloader_by_dlid(dlid=dlid):
dlid = u"download%08d" % random.randint(0, 99999999)
return dlid
class RemoteDownloader(DDBObject):
"""Download a file using the downloader daemon."""
MIN_STATUS_UPDATE_SPACING = 0.7
def setup_new(self, url, item, contentType=None, channelName=None):
check_u(url)
if contentType:
check_u(contentType)
self.origURL = self.url = url
self.item_list = []
self.child_deleted = False
self.main_item_id = None
self.dlid = generate_dlid()
self.status = {}
self.metainfo = None
self.state = u'downloading'
if contentType is None:
# HACK: Some servers report the wrong content-type for
# torrent files. We try to work around that by assuming
# if the enclosure states that something is a torrent,
# it's a torrent. Thanks to j@v2v.cc.
if item.enclosure_type == u'application/x-bittorrent':
contentType = item.enclosure_type
self.contentType = u""
self.delete_files = True
self.channelName = channelName
self.manualUpload = False
self._save_later_dc = None
self._update_retry_time_dc = None
self.status_updates_frozen = False
self.last_update = time.time()
if contentType is None:
self.contentType = u""
else:
self.contentType = contentType
if self.contentType == u'':
self.get_content_type()
else:
self.run_downloader()
@classmethod
def finished_view(cls):
return cls.make_view("state in ('finished', 'uploading', "
"'uploading-paused')")
@classmethod
def auto_uploader_view(cls):
return cls.make_view("state == 'uploading' AND NOT manualUpload")
@classmethod
def get_by_dlid(cls, dlid):
return cls.make_view('dlid=?', (dlid,)).get_singleton()
@classmethod
def get_by_url(cls, url):
return cls.make_view('origURL=?', (url,)).get_singleton()
@classmethod
def orphaned_view(cls):
"""Downloaders with no items associated with them."""
return cls.make_view('id NOT IN (SELECT downloader_id from item)')
def signal_change(self, needs_save=True, needs_signal_item=True):
DDBObject.signal_change(self, needs_save=needs_save)
if needs_signal_item:
for item in self.item_list:
item.signal_change(needs_save=False)
if needs_save:
self._cancel_save_later()
def _save_later(self):
"""Save the remote downloader at some point in the future.
This is used to handle the fact that remote downloaders are
updated often, but those updates are usually just the status
dict, which is never used for SELECT statements. Continually
saving those changes to disk is just a waste of time and IO.
Instead, we schedule the save to happen sometime in the
future. When miro quits, we call the module-level function
run_delayed_saves(), which makes sure any pending objects are
saved to disk.
"""
if self._save_later_dc is None:
self._save_later_dc = eventloop.add_timeout(15,
self._save_now, "Delayed RemoteDownloader save")
def _save_now(self):
"""If _save_later() was called and we haven't saved the
downloader to disk, do it now.
"""
if self.id_exists() and self._save_later_dc is not None:
self.signal_change(needs_signal_item=False)
def _cancel_save_later(self):
if self._save_later_dc is not None:
self._save_later_dc.cancel()
self._save_later_dc = None
def on_content_type(self, info):
if not self.id_exists():
return
if info['status'] == 200:
self.url = info['updated-url'].decode('ascii','replace')
self.contentType = None
try:
self.contentType = info['content-type'].decode('ascii',
'replace')
except (KeyError, UnicodeDecodeError):
self.contentType = None
self.run_downloader()
else:
error = httpclient.UnexpectedStatusCode(info['status'])
self.on_content_type_error(error)
def on_content_type_error(self, error):
if not self.id_exists():
return
if isinstance(error, httpclient.AuthorizationCanceled):
# user canceled out of the authorization request, so stop the
# download.
self.status['state'] = u'stopped'
self.signal_change()
return
# we can't get a content type. it's possible that this is a
# retryable error so we're going to set the contentType to
# None and run the downloader. it'll handle HTTP errors
# better than we will.
self.contentType = None
self.run_downloader()
def get_content_type(self):
if is_magnet_uri(self.url):
self.contentType = u'application/x-magnet'
return
httpclient.grab_headers(self.url, self.on_content_type,
self.on_content_type_error)
@classmethod
def initialize_daemon(cls):
RemoteDownloader.dldaemon = daemon.ControllerDaemon()
def _get_rates(self):
state = self.get_state()
if state == u'downloading':
return (self.status.get('rate', 0), self.status.get('upRate', 0))
if state == u'uploading':
return (0, self.status.get('upRate', 0))
return (0, 0)
def before_changing_status(self):
rates = self._get_rates()
app.download_state_manager.total_down_rate -= rates[0]
app.download_state_manager.total_up_rate -= rates[1]
def after_changing_status(self):
self._recalc_state()
rates = self._get_rates()
app.download_state_manager.total_down_rate += rates[0]
app.download_state_manager.total_up_rate += rates[1]
@classmethod
def update_status(cls, data, cmd_done=False):
for field in data:
if field not in ['filename', 'shortFilename', 'channelName',
'metainfo']:
data[field] = unicodify(data[field])
self = get_downloader_by_dlid(dlid=data['dlid'])
if self is not None:
now = time.time()
last_update = self.last_update
rate_limit = False
state = self.get_state()
new_state = data.get('state', u'downloading')
# If this item was marked as pending update, then any update
# which comes in now which does not have cmd_done set is void.
if not cmd_done and self.status_updates_frozen:
logging.debug('self = %s, '
'saved state = %s '
'downloader state = %s. '
'Discard.',
self, state, new_state)
# treat as stale
return False
# If the timing between the status updates is too narrow,
# try to skip it because it makes the UI jerky otherwise.
if now < last_update:
logging.debug('time.time() gone backwards last = %s now = %s',
last_update, now)
else:
diff = now - last_update
if diff < self.MIN_STATUS_UPDATE_SPACING:
logging.debug('Rate limit: '
'self = %s, now - last_update = %s, '
'MIN_STATUS_UPDATE_SPACING = %s.',
self, diff, self.MIN_STATUS_UPDATE_SPACING)
rate_limit = True
# If the state is one which we set and was meant to be passed
# through to the downloader (valid_states), and the downloader
# replied with something that was a response to a previous
# download command, and state was also a part of valid_states,
# but the saved state and the new state do not match
# then it means the message is stale.
#
# Have a think about why this is true: when you set a state,
# which is authoritative, to the downloader you expect it
# to reply with that same state. If they do not match then it
# means the message is stale.
#
# The exception to this rule is if the downloader replies with
# an error state, or if downloading has transitioned to finished
# state.
#
# This also does not apply to any state which we set on the
# downloader via a restore command. A restore command before
# a pause/resume/cancel will work as intended, and no special
# trickery is required. A restore command which happens after
# a pause/resume/cancel is void, so no work is required.
#
# I hope this makes sense and is clear!
valid_states = (u'downloading', u'paused', u'stopped',
u'uploading-paused', u'finished')
if (cmd_done and
state in valid_states and new_state in valid_states and
state != new_state):
if not (state == u'downloading' and new_state == u'finished'):
logging.debug('self = %s STALE. '
'Saved state %s, got state %s. Discarding.',
self, state, new_state)
return False
# We are updating! Reset the status_updates_frozen flag.
self.status_updates_frozen = False
# FIXME - this should get fixed.
metainfo = data.pop('metainfo', self.metainfo)
# For metainfo, the downloader process doesn't send the
# keys if they haven't changed. Therefore, use our
# current values if the key isn't present.
current = (self.status, self.metainfo)
new = (data, metainfo)
if current == new:
return True
# We have something to update: update the last updated timestamp.
self.last_update = now
was_finished = self.is_finished()
old_filename = self.get_filename()
self.before_changing_status()
# FIXME: how do we get all of the possible bit torrent
# activity strings into gettext? --NN
if data.has_key('activity') and data['activity']:
data['activity'] = _(data['activity'])
# only set attributes if something's changed. This makes our
# UPDATE statments contain less data
if data != self.status:
self.status = data
if metainfo != self.metainfo:
self.metainfo = metainfo
self._recalc_state()
# Store the time the download finished
finished = self.is_finished() and not was_finished
name_changed = self.get_filename() != old_filename
file_migrated = (self.is_finished() and name_changed)
needs_signal_item = not (finished or file_migrated or rate_limit)
self.after_changing_status()
if ((self.get_state() == u'uploading'
and not self.manualUpload
and (app.config.get(prefs.LIMIT_UPLOAD_RATIO)
and self.get_upload_ratio() > app.config.get(prefs.UPLOAD_RATIO)))):
self.stop_upload()
if self.changed_attributes == set(('status',)):
# if we just changed status, then we can wait a while
# to store things to disk. Since we go through
# update_status() often, this results in a fairly
# large performance gain and alleviates #12101
self._save_later()
self.signal_change(needs_signal_item=needs_signal_item,
needs_save=False)
else:
self.signal_change()
if finished:
for item in self.item_list:
item.on_download_finished()
elif file_migrated:
self._file_migrated(old_filename)
elif name_changed and old_filename:
# update the title; happens with magnet URLs since we don't
# have a real one when the download starts. The old_filename
# check is to prevent things with existing titles from being
# renamed (#18656).
new_title = self.status['shortFilename']
if not isinstance(new_title, unicode):
try:
new_title = new_title.decode('utf-8')
except UnicodeDecodeError:
# if there's a problem with the filename, don't bother
# changing
return
for item in self.item_list:
if item.title is None:
item.title = new_title
item.signal_change()
return True
def run_downloader(self):
"""This is the actual download thread.
"""
flashscraper.try_scraping_url(self.url, self._run_downloader)
def _run_downloader(self, url, contentType=None, title=None):
if not self.id_exists():
# we got deleted while we were doing the flash scraping
return
if contentType is not None:
self.contentType = contentType
if url is not None:
if title is not None:
for mem in self.item_list:
if not mem.title:
mem.title = title
self.url = url
logging.debug("downloading url %s", self.url)
args = dict(url=self.url, content_type=self.contentType,
channel_name=self.channelName)
app.download_state_manager.add_download(self.dlid, self)
app.download_state_manager.queue(self.dlid,
app.download_state_manager.RESUME,
args)
self.status["state"] = u"downloading"
else:
self.status["state"] = u'failed'
self.status["shortReasonFailed"] = _('File not found')
self.status["reasonFailed"] = _('Flash URL Scraping Error')
self.signal_change()
def pause(self):
"""Pauses the download."""
if app.download_state_manager.get_download(self.dlid):
args = dict(upload=False)
app.download_state_manager.queue(self.dlid,
app.download_state_manager.PAUSE,
args)
self.before_changing_status()
self.status["state"] = u"paused"
self.after_changing_status()
self.signal_change()
def stop(self, delete):
"""Stops the download and removes the partially downloaded
file.
"""
if self.get_state() in [u'downloading', u'uploading', u'paused',
u'offline']:
if app.download_state_manager.get_download(self.dlid):
args = dict(upload=False, delete=delete)
app.download_state_manager.queue(
self.dlid,
app.download_state_manager.STOP,
args)
app.download_state_manager.delete_download(self.dlid)
if delete:
self.delete()
self.status["state"] = u"stopped"
self.signal_change()
def delete(self):
if "filename" in self.status:
filename = self.status['filename']
else:
return
try:
fileutil.delete(filename)
except OSError:
logging.exception("Error deleting downloaded file: %s",
to_uni(filename))
parent = os.path.join(fileutil.expand_filename(filename),
os.path.pardir)
parent = os.path.normpath(parent)
movies_dir = fileutil.expand_filename(app.config.get(prefs.MOVIES_DIRECTORY))
if ((os.path.exists(parent) and os.path.exists(movies_dir)
and not samefile(parent, movies_dir)
and len(os.listdir(parent)) == 0)):
try:
os.rmdir(parent)
except OSError:
logging.exception("Error deleting empty download directory: %s",
to_uni(parent))
def start(self):
"""Continues a paused, stopped, or failed download thread
"""
if self.get_state() == u'failed':
# For failed downloads, don't trust the redirected URL (#14232)
self.url = self.origURL
app.download_state_manager.delete_download(self.dlid)
self.dlid = generate_dlid()
self.before_changing_status()
self.status = {}
self.after_changing_status()
if self.contentType == u"":
self.get_content_type()
else:
self.run_downloader()
self.signal_change()
elif self.get_state() in (u'stopped', u'paused', u'offline'):
if app.download_state_manager.get_download(self.dlid):
args = dict(url=self.url, content_type=self.contentType,
channel_name=self.channelName)
app.download_state_manager.queue(
self.dlid,
app.download_state_manager.RESUME,
args)
self.status['state'] = u'downloading'
self.restart()
self.signal_change()
def migrate(self, directory):
if app.download_state_manager.get_download(self.dlid):
c = command.MigrateDownloadCommand(RemoteDownloader.dldaemon,
self.dlid, directory)
c.send()
else:
# downloader doesn't have our dlid. Move the file ourself.
short_filename = self.status.get("shortFilename")
if not short_filename:
logging.warning(
"can't migrate download; no shortfilename! URL was %s",
self.url)
return
filename = self.status.get("filename")
if not filename:
logging.warning(
"can't migrate download; no filename! URL was %s",
self.url)
return
if fileutil.exists(filename):
if self.status.get('channelName', None) is not None:
channelName = filter_directory_name(self.status['channelName'])
directory = os.path.join(directory, channelName)
if not os.path.exists(directory):
try:
fileutil.makedirs(directory)
except OSError:
# FIXME - what about permission issues?
pass
newfilename = os.path.join(directory, short_filename)
if newfilename == filename:
return
# create a file or directory to serve as a placeholder before
# we start to migrate. This helps ensure that the destination
# we're migrating too is not already taken.
try:
is_dir = fileutil.isdir(filename)
if is_dir:
newfilename = next_free_directory(newfilename)
fp = None
else:
newfilename, fp = next_free_filename(newfilename)
fp.close()
except ValueError:
func = ('next_free_directory' if is_dir
else 'next_free_filename')
logging.warn('migrate: %s failed. candidate = %r',
func, newfilename)
else:
def callback():
self.status['filename'] = newfilename
self.signal_change(needs_signal_item=False)
self._file_migrated(filename)
fileutil.migrate_file(filename, newfilename, callback)
for i in self.item_list:
i.migrate_children(directory)
def _file_migrated(self, old_filename):
# Make sure that item_list is populated with items, see (#12202)
for item in models.Item.downloader_view(self.id):
self.add_item(item)
for item in self.item_list:
item.on_downloader_migrated(old_filename, self.get_filename())
def set_delete_files(self, delete_files):
self.delete_files = delete_files
def set_channel_name(self, channelName):
if self.channelName is None:
if channelName:
check_f(channelName)
self.channelName = channelName
def remove(self):
"""Removes downloader from the database and deletes the file.
"""
rates = self._get_rates()
app.download_state_manager.total_down_rate -= rates[0]
app.download_state_manager.total_up_rate -= rates[1]
if self.is_finished():
app.local_metadata_manager.remove_file(self.get_filename())
self.stop(self.delete_files)
DDBObject.remove(self)
def get_type(self):
"""Get the type of download. Will return either "http" or
"bittorrent".
"""
self.confirm_db_thread()
if ((self.contentType == u'application/x-bittorrent'
or self.contentType == u'application/x-magnet')):
return u"bittorrent"
return u"http"
def add_item(self, item):
"""In case multiple downloaders are getting the same file, we
can support multiple items
"""
if item not in self.item_list:
self.item_list.append(item)
if self.main_item_id is None:
self.main_item_id = item.id
self.signal_change()
def remove_item(self, item):
self.item_list.remove(item)
if len (self.item_list) == 0:
self.remove()
elif item.id == self.main_item_id:
self.main_item_id = self.item_list[0].id
self.signal_change()
def get_rate(self):
self.confirm_db_thread()
return self.status.get('rate', 0)
def get_eta(self):
self.confirm_db_thread()
return self.status.get('eta', 0)
@returns_unicode
def get_startup_activity(self):
self.confirm_db_thread()
activity = self.status.get('activity')
if ((activity is None and self.status.get('retryCount', -1) > -1
and 'retryTime' in self.status)):
activity = self._calc_retry_time()
if self._update_retry_time_dc is None:
self._update_retry_time_dc = eventloop.add_timeout(1,
self._update_retry_time, 'Updating retry time')
if activity is None:
return _("starting up")
return activity
def _calc_retry_time(self):
if self.status['retryTime'] > datetime.datetime.now():
retry_delta = self.status['retryTime'] - datetime.datetime.now()
time_str = displaytext.time_string(retry_delta.seconds)
return _('no connection - retrying in %(time)s', {"time": time_str})
else:
return _('no connection - retrying soon')
def _update_retry_time(self):
if self.id_exists():
# calling signal_change() will cause the us to call
# get_startup_activity() again which will have a new time now.
self.signal_change(needs_save=False)
self._update_retry_time_dc = None
def _cancel_retry_time_update(self):
if self._update_retry_time_dc:
self._update_retry_time_dc.cancel()
self._update_retry_time_dc = None
@returns_unicode
def get_reason_failed(self):
"""Returns the reason for the failure of this download. This
should only be called when the download is in the failed
state.
"""
if not self.get_state() == u'failed':
msg = u"get_reason_failed() called on a non-failed downloader"
raise ValueError(msg)
self.confirm_db_thread()
return self.status.get('reasonFailed', _("Unknown"))
@returns_unicode
def get_short_reason_failed(self):
if not self.get_state() == u'failed':
msg = u"get_short_reason_failed() called on a non-failed downloader"
raise ValueError(msg)
self.confirm_db_thread()
return self.status.get('shortReasonFailed', _("Unknown"))
@returns_unicode
def get_url(self):
"""Returns the URL we're downloading
"""
self.confirm_db_thread()
return self.url
@returns_unicode
def get_state(self):
"""Returns the state of the download: downloading, paused,
stopped, failed, or finished.
"""
self.confirm_db_thread()
return self.state
def is_finished(self):
return self.get_state() in (u'finished', u'uploading',
u'uploading-paused')
def get_total_size(self):
"""Returns the total size of the download in bytes.
"""
self.confirm_db_thread()
return self.status.get('totalSize', -1)
def get_current_size(self):
"""Returns the current amount downloaded in bytes.
"""
self.confirm_db_thread()
return self.status.get('currentSize', 0)
@returns_filename
def get_filename(self):
"""Returns the filename that we're downloading to. Should not be
called until state is "finished."
"""
self.confirm_db_thread()
# FIXME - FilenameType('') is a bogus value, but looks like a
# filename. should return None.
return self.status.get('filename', FilenameType(''))
def setup_restored(self):
self.status_updates_frozen = False
self.last_update = time.time()
self._save_later_dc = None
self._update_retry_time_dc = None
self.delete_files = True
self.item_list = []
if self.dlid == 'noid':
# this won't happen nowadays, but it can for old databases
self.dlid = generate_dlid()
self.status['rate'] = 0
self.status['upRate'] = 0
self.status['eta'] = 0
def on_signal_change(self):
self._recalc_state()
def _recalc_state(self):
new_state = self.status.get('state', u'downloading')
# avoid altering changed_attributes if we don't need to
if new_state != self.state:
self.state = new_state
def get_upload_ratio(self):
size = self.get_current_size()
if size == 0:
return 0
return self.status.get('uploaded', 0) / size
def restart_on_startup_if_needed(self):
if not self.id_exists():
return
if app.download_state_manager.get_download(self.dlid):
# something has caused us to restart already, (for
# example, the user selects "resume seeding"). squelch
# any automatic behaviour (#12462)
return
if self.get_state() in (u'downloading', u'offline'):
self.restart()
if self.get_state() == u'uploading':
if ((self.manualUpload
or (app.config.get(prefs.LIMIT_UPLOAD_RATIO)
and self.get_upload_ratio() < app.config.get(prefs.UPLOAD_RATIO)))):
self.restart()
else:
self.stop_upload()
def restart(self):
if not self.status or self.status.get('dlerType') is None:
if self.contentType == u"":
self.get_content_type()
else:
self.run_downloader()
else:
app.download_state_manager.add_download(self.dlid, self)
dler_status = self.status
# FIXME: not sure why this is necessary
if self.contentType == u'application/x-magnet':
dler_status['url'] = self.url
dler_status['metainfo'] = self.metainfo
args = dict(downloader=dler_status)
app.download_state_manager.queue(
self.dlid,
app.download_state_manager.RESTORE,
args)
self.before_changing_status()
self.status['state'] = u'downloading'
self.after_changing_status()
def start_upload(self):
"""
Start an upload (seeding).
"""
if self.get_type() != u'bittorrent':
logging.warn("called start_upload for non-bittorrent downloader")
return
if self.child_deleted:
title = "Can't Resume Seeding"
msg = ("Seeding cannot resume because part of this torrent "
"has been deleted.")
dialogs.MessageBoxDialog(title, msg).run()
return
if self.get_state() not in (u'finished', u'uploading-paused'):
logging.warn("called start_upload when downloader state is: %s",
self.get_state())
return
self.manualUpload = True
if app.download_state_manager.get_download(self.dlid):
args = dict(url=self.url, content_type=self.contentType,
channel_name=self.channelName)
app.download_state_manager.queue(self.dlid,
app.download_state_manager.RESUME,
args)
else:
self.before_changing_status()
self.status['state'] = u'uploading'
self.after_changing_status()
self.restart()
self.signal_change()
def stop_upload(self):
"""
Stop uploading/seeding and set status as "finished".
"""
if app.download_state_manager.get_download(self.dlid):
args = dict(upload=True)
app.download_state_manager.queue(self.dlid,
app.download_state_manager.STOP, args)
app.download_state_manager.delete_download(self.dlid)
self.before_changing_status()
self.status["state"] = u"finished"
self.after_changing_status()
self.signal_change()
def pause_upload(self):
"""
Stop uploading/seeding and set status as "uploading-paused".
"""
if app.download_state_manager.get_download(self.dlid):
args = dict(upload=True)
app.download_state_manager.queue(self.dlid,
app.download_state_manager.PAUSE,
args)
app.download_state_manager.delete_download(self.dlid)
self.before_changing_status()
self.status["state"] = u"uploading-paused"
self.after_changing_status()
self.signal_change()
def cleanup_incomplete_downloads():
download_dir = os.path.join(app.config.get(prefs.MOVIES_DIRECTORY),
'Incomplete Downloads')
if not fileutil.exists(download_dir):
return
files_in_use = set()
for downloader in RemoteDownloader.make_view():
if downloader.get_state() in ('downloading', 'paused',
'offline', 'uploading', 'finished',
'uploading-paused'):
filename = downloader.get_filename()
if len(filename) > 0:
if not fileutil.isabs(filename):
filename = os.path.join(download_dir, filename)
files_in_use.add(filename)
try:
entries = fileutil.listdir(download_dir)
except OSError:
entries = []
for f in entries:
f = os.path.join(download_dir, f)
if f not in files_in_use:
try:
if fileutil.isfile(f):
fileutil.remove(f)
elif fileutil.isdir(f):
fileutil.rmtree(f)
except OSError:
# FIXME - maybe a permissions error?
pass
def kill_uploaders(*args):
torrent_limit = app.config.get(prefs.UPSTREAM_TORRENT_LIMIT)
auto_uploads = list(RemoteDownloader.auto_uploader_view())
for dler in auto_uploads[torrent_limit:]:
dler.stop_upload()
def _on_config_change(obj, key, value):
if key == prefs.UPSTREAM_TORRENT_LIMIT.key:
kill_uploaders()
class DownloadDaemonStarter(object):
def __init__(self):
RemoteDownloader.initialize_daemon()
self.downloads_at_startup = list(RemoteDownloader.make_view())
self.started = False
self._config_callback_handle = None
self._download_tracker = None
def limit_uploaders(self):
view = RemoteDownloader.auto_uploader_view()
self._download_tracker = view.make_tracker()
self._download_tracker.connect('added', kill_uploaders)
self._config_callback_handle = app.backend_config_watcher.connect(
"changed", _on_config_change)
kill_uploaders()
def disconnect_signals(self):
if self._download_tracker is not None:
self._download_tracker.unlink()
self._download_tracker = None
if self._config_callback_handle is not None:
app.backend_config_watcher.disconnect(self._config_callback_handle)
self._config_callback_handle = None
def startup(self):
cleanup_incomplete_downloads()
RemoteDownloader.dldaemon.start_downloader_daemon()
self.limit_uploaders()
self.restart_downloads()
self.started = True
def restart_downloads(self):
for downloader in self.downloads_at_startup:
downloader.restart_on_startup_if_needed()
def shutdown(self, callback):
self.disconnect_signals()
self.shutdown_callback = callback
if not self.started:
self._on_shutdown()
else:
RemoteDownloader.dldaemon.shutdown_downloader_daemon(
callback=self._on_shutdown)
def _on_shutdown(self):
shutdown_downloader_objects()
self.shutdown_callback()
del self.shutdown_callback
def lookup_downloader(url):
try:
return RemoteDownloader.get_by_url(url)
except ObjectNotFoundError:
return None
def get_existing_downloader_by_url(url):
downloader = lookup_downloader(url)
return downloader
def get_existing_downloader(item):
try:
return RemoteDownloader.get_by_id(item.downloader_id)
except ObjectNotFoundError:
return None
def get_downloader_for_item(item):
existing = get_existing_downloader(item)
if existing:
return existing
url = item.get_url()
existing = get_existing_downloader_by_url(url)
if existing:
return existing
channelName = unicode_to_filename(item.get_channel_title(True))
if not channelName:
channelName = None
if url.startswith(u'file://'):
path = get_file_url_path(url)
try:
get_torrent_info_hash(path)
except ValueError:
raise ValueError("Don't know how to handle %s" % url)
except (OSError, IOError):
return None
else:
return RemoteDownloader(url, item, u'application/x-bittorrent',
channelName=channelName)
elif is_magnet_uri(url):
return RemoteDownloader(url, item, u'application/x-magnet')
else:
return RemoteDownloader(url, item, channelName=channelName)
def shutdown_downloader_objects():
"""Perform shutdown code for RemoteDownloaders.
This means a couple things:
- Make sure any RemoteDownloaders with pending changes get saved.
- Cancel the update retry time callbacks
"""
for downloader in RemoteDownloader.make_view():
downloader._save_now()
downloader._cancel_retry_time_update()
|
SummerLW/Perf-Insight-Report
|
refs/heads/test
|
dashboard/dashboard/add_point_queue_test.py
|
5
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from dashboard import add_point_queue
from dashboard import testing_common
from dashboard import utils
from dashboard.models import graph_data
from dashboard.models import stoppage_alert
class GetOrCreateAncestorsTest(testing_common.TestCase):
def setUp(self):
super(GetOrCreateAncestorsTest, self).setUp()
self.SetCurrentUser('foo@bar.com', is_admin=True)
def testGetOrCreateAncestors_GetsExistingEntities(self):
master_key = graph_data.Master(id='ChromiumPerf', parent=None).put()
bot_key = graph_data.Bot(id='win7', parent=master_key).put()
suite_key = graph_data.Test(id='dromaeo', parent=bot_key).put()
subtest_key = graph_data.Test(id='dom', parent=suite_key).put()
graph_data.Test(id='modify', parent=subtest_key).put()
actual_parent = add_point_queue._GetOrCreateAncestors(
'ChromiumPerf', 'win7', 'dromaeo/dom/modify')
self.assertEqual('modify', actual_parent.key.id())
# No extra Test or Bot objects should have been added to the database
# beyond the four that were put in before the _GetOrCreateAncestors call.
self.assertEqual(1, len(graph_data.Master.query().fetch()))
self.assertEqual(1, len(graph_data.Bot.query().fetch()))
self.assertEqual(3, len(graph_data.Test.query().fetch()))
def testGetOrCreateAncestors_CreatesAllExpectedEntities(self):
parent = add_point_queue._GetOrCreateAncestors(
'ChromiumPerf', 'win7', 'dromaeo/dom/modify')
self.assertEqual('modify', parent.key.id())
# Check that all the Bot and Test entities were correctly added.
created_masters = graph_data.Master.query().fetch()
created_bots = graph_data.Bot.query().fetch()
created_tests = graph_data.Test.query().fetch()
self.assertEqual(1, len(created_masters))
self.assertEqual(1, len(created_bots))
self.assertEqual(3, len(created_tests))
self.assertEqual('ChromiumPerf', created_masters[0].key.id())
self.assertIsNone(created_masters[0].key.parent())
self.assertEqual('win7', created_bots[0].key.id())
self.assertEqual('ChromiumPerf', created_bots[0].key.parent().id())
self.assertEqual('dromaeo', created_tests[0].key.id())
self.assertIsNone(created_tests[0].parent_test)
self.assertEqual('win7', created_tests[0].bot.id())
self.assertEqual('dom', created_tests[1].key.id())
self.assertEqual('dromaeo', created_tests[1].parent_test.id())
self.assertIsNone(created_tests[1].bot)
self.assertEqual('modify', created_tests[2].key.id())
self.assertEqual('dom', created_tests[2].parent_test.id())
self.assertIsNone(created_tests[2].bot)
def testGetOrCreateAncestors_UpdatesStoppageAlert(self):
testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}})
row = testing_common.AddRows('M/b/suite/foo', {123})[0]
test = utils.TestKey('M/b/suite/foo').get()
alert_key = stoppage_alert.CreateStoppageAlert(test, row).put()
test.stoppage_alert = alert_key
test.put()
add_point_queue._GetOrCreateAncestors('M', 'b', 'suite/foo')
self.assertIsNone(test.key.get().stoppage_alert)
self.assertTrue(alert_key.get().recovered)
if __name__ == '__main__':
unittest.main()
|
gurkerl83/millipede-xtreemfs
|
refs/heads/pseiferth-libxtreemfsjava
|
contrib/ganglia-plugin/src/xtfs-osd-plugin.py
|
7
|
'''
Created on May 25, 2011
@author: bzcseife
This is a python ganglia plugin which monitors the status of an OSD service of the XtreemFS
filesystem. It is intend to run on the same host as the OSD and gathers information of the OSD per
SNMP. Therefore you have to configure your OSD to provide a SNMP Agent on this host.
'''
#TODO: If ganglia supports 64bit values uses 64bit integers instead of converting all 64 bit integers
#reported from the SNMP Agent to 32bit integers.
import random
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.entity.rfc3413.oneliner.cmdgen import UdpTransportTarget
descriptors = list()
Random_Max = 50
Constant_Value = 50
#Get the used memory of the JVM
def JvmUsedMem(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 1, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the free memory of the JVM
def JvmFreeMem(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 2, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the number of client connections
def ClientConnections(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 7, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of pending requests
def PendingRequests(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 8, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of objects received
def ObjectsReceived(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 1, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of replicated objects received
def ReplObjectsReceived(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 2, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of replicated objects transmitted
def ObjectsTransmitted(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 3, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of replicated bytes received
def ReplBytesReceived(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 4, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the number of bytes received
def BytesReceived(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 5, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the number of bytes transmitted
def BytesTransmitted(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 6, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the length of the preprocessing stage queue
def PreprocQueueLength(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 7, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#Get the length of the storage stage queue
def StorageQueueLength(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 8, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the length of the deletion stage queue
def DeletionQueueLength(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 9, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of open files from the OSD per snmp
def OsdOpenFiles(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 10, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the number of deleted files from the OSD per snmp
def OsdDeletedFiles(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 11, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1])
else:
return 0
#Get the free space from the OSD per snmp
def OsdFreeSpace(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 4, 12, 0))
if (errorStatus == False and errorIndication == None):
return int(varBinds[0][1] / 1024 / 1024)
else:
return 0
#get the status of the OSD
def Status(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 11, 0))
if (errorStatus == False and errorIndication == None):
return str(varBinds[0][1])
else:
return "OFFLINE"
#get the UUID of the OSD
#OID: 1.3.6.1.4.1.38350.1.13.0
def Uuid(name):
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(authData,
transportTarget,
(1, 3, 6, 1, 4, 1, 38350, 1, 13, 0))
if (errorStatus == False and errorIndication == None):
return str(varBinds[0][1])
else:
return "Service not available"
def metric_init(params):
global descriptors
global Commmunity_String
global Snmp_Port
global authData
global transportTarget
if 'ComummunityString' in params:
Community_String = params['CommunityString']
else:
Community_String = 'public'
if 'Port' in params:
Snmp_Port = int(params['Port'])
if 'Host' in params:
Snmp_Host = params['Host']
authData = cmdgen.CommunityData('xtreemfs-agent', 'public')
transportTarget = cmdgen.UdpTransportTarget((Snmp_Host, Snmp_Port),1,0)
d0 = {'name': 'osd_jvm_used_mem',
'call_back': JvmUsedMem,
'time_max': 90,
'value_type': 'uint',
'units': 'Megabytes',
'slope': 'both',
'format': '%u',
'description': 'The amount of memory the JVM uses currently.',
'groups': 'osd'}
d1 = {'name': 'osd_jvm_free_mem',
'call_back': JvmFreeMem,
'time_max': 90,
'value_type': 'uint',
'units': 'Megabytes',
'slope': 'both',
'format': '%u',
'description': 'The amount of free memory the JVM can still use.',
'groups': 'osd'}
d2 = {'name': 'osd_client_connections',
'call_back': ClientConnections,
'time_max': 90,
'value_type': 'uint',
'units': 'clients',
'slope': 'both',
'format': '%u',
'description': 'The number of active client connection this OSD has currently to handle.',
'groups': 'osd'}
d3 = {'name': 'osd_pending_requests',
'call_back': PendingRequests,
'time_max': 90,
'value_type': 'uint',
'units': 'pending requests',
'slope': 'both',
'format': '%u',
'description': 'The number of pending requests this OSD has enqueued.',
'groups': 'osd'}
d4 = {'name': 'objects_received',
'call_back': ObjectsReceived,
'time_max': 90,
'value_type': 'uint',
'units': 'objects',
'slope': 'positive',
'format': '%u',
'description': 'The number of objects this OSD has received.',
'groups': 'osd'}
d5 = {'name': 'repl_objects_received',
'call_back': ReplObjectsReceived,
'time_max': 90,
'value_type': 'uint',
'units': 'objects',
'slope': 'positive',
'format': '%u',
'description': 'The number of replicated objects this OSD has received.',
'groups': 'osd'}
d6 = {'name': 'objects_transmitted',
'call_back': ObjectsTransmitted,
'time_max': 90,
'value_type': 'uint',
'units': 'objects',
'slope': 'positive',
'format': '%u',
'description': 'The number of objects this OSD has transmitted.',
'groups': 'osd'}
d7 = {'name': 'repl_bytes_received',
'call_back': ReplBytesReceived,
'time_max': 90,
'value_type': 'uint',
'units': 'Megabytes',
'slope': 'positive',
'format': '%u',
'description': 'The number of replicated bytes this OSD has received.',
'groups': 'osd'}
d8 = {'name': 'bytes_received',
'call_back': BytesReceived,
'time_max': 90,
'value_type': 'uint',
'units': 'Megabytes',
'slope': 'positive',
'format': '%u',
'description': 'The number of bytes this OSD has received.',
'groups': 'osd'}
d9 = {'name': 'bytes_transmitted',
'call_back': BytesTransmitted,
'time_max': 90,
'value_type': 'uint',
'units': 'Megabytes',
'slope': 'positive',
'format': '%u',
'description': 'The number of bytes this OSD has transmitted.',
'groups': 'osd'}
d10 = {'name': 'preproc_queue_length',
'call_back': PreprocQueueLength,
'time_max': 90,
'value_type': 'uint',
'units': 'requests',
'slope': 'both',
'format': '%u',
'description': 'The length of the preprocessing stage queue of this OSD.',
'groups': 'osd'}
d11 = {'name': 'storage_queue_length',
'call_back': StorageQueueLength,
'time_max': 90,
'value_type': 'uint',
'units': 'requests',
'slope': 'positive',
'format': '%u',
'description': 'The length of the storage stage queue of this OSD.',
'groups': 'osd'}
d12 = {'name': 'deletion_queue_length',
'call_back': DeletionQueueLength,
'time_max': 90,
'value_type': 'uint',
'units': 'requests',
'slope': 'both',
'format': '%u',
'description': 'The length of the deletion stage queue of this OSD.',
'groups': 'osd'}
d13 = {'name': 'storage_queue_length',
'call_back': StorageQueueLength,
'time_max': 90,
'value_type': 'uint',
'units': 'requests',
'slope': 'both',
'format': '%u',
'description': 'The length of the storage stage queue of this OSD.',
'groups': 'osd'}
d14 = {'name': 'open_files',
'call_back': OsdOpenFiles,
'time_max': 90,
'value_type': 'uint',
'units': 'files',
'slope': 'both',
'format': '%u',
'description': 'The number of file this OSD has currently opened.',
'groups': 'osd'}
d15 = {'name': 'deleted_files',
'call_back': OsdDeletedFiles,
'time_max': 90,
'value_type': 'uint',
'units': 'files',
'slope': 'positive',
'format': '%u',
'description': 'The number of deleted files on this OSD',
'groups': 'osd'}
d16 = {'name': 'free_space',
'call_back': OsdFreeSpace,
'time_max': 90,
#value_type: string | uint | float | double
'value_type': 'uint',
#units: unit of your metric
'units': 'Megabytes',
#slope: zero | positive | negative | both
#This value maps to the data source types defined for RRDTool
#If 'positive', RRD file generated will be of COUNTER type (calculating the rate of change)
#If 'negative', ????
#'both' will be of GAUGE type (no calculations are performed, graphing only the value reported)
#If 'zero', the metric will appear in the "Time and String Metrics" or the "Constant Metrics" depending on the value_type of the m
'slope': 'both',
#format: format string of your metric
#Must correspond to value_type otherwise value of your metric will be unpredictable (reference: http://docs.python.org/library/stdtypes.html#string-formatting)
'format': '%u',
#description: description of your metric
'description': 'The free disc space on the partition this OSD stores the object files.',
#groups (optional): groups your metric belongs to
'groups': 'osd'}
d17 = {'name': 'osd_status',
'call_back': Status,
'time_max': 90,
'value_type': 'string',
'units': '',
'slope': 'zero',
'format': '%s',
'description': 'ONLINE if this OSD is running correctly, OFFLINE otherwise',
'groups': 'osd'}
d18 = {'name': 'osd_uuid',
'call_back': Uuid,
'time_max': 90,
'value_type': 'string',
'units': '',
'slope': 'zero',
'format': '%s',
'description': 'UUID of the OSD running on this host',
'groups': 'osd'}
descriptors = [d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18]
return descriptors
def metric_cleanup():
'''Clean up the metric module.'''
pass
#for debugging purpose
if __name__ == '__main__':
params = {'CommunityString': 'public', 'Host': 'localhost', 'Port': 9003}
metric_init(params)
for d in descriptors:
v = d['call_back'](d['name'])
print 'value for %s is' % (d['name'])
print v
|
geminy/aidear
|
refs/heads/master
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/gyp/aidl.py
|
8
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Invokes Android's aidl
"""
import optparse
import os
import re
import sys
import zipfile
from util import build_utils
def main(argv):
option_parser = optparse.OptionParser()
build_utils.AddDepfileOption(option_parser)
option_parser.add_option('--aidl-path', help='Path to the aidl binary.')
option_parser.add_option('--imports', help='Files to import.')
option_parser.add_option('--includes',
help='Directories to add as import search paths.')
option_parser.add_option('--srcjar', help='Path for srcjar output.')
options, args = option_parser.parse_args(argv[1:])
with build_utils.TempDir() as temp_dir:
for f in args:
classname = os.path.splitext(os.path.basename(f))[0]
output = os.path.join(temp_dir, classname + '.java')
aidl_cmd = [options.aidl_path]
aidl_cmd += [
'-p' + s for s in build_utils.ParseGnList(options.imports)
]
if options.includes is not None:
aidl_cmd += [
'-I' + s for s in build_utils.ParseGnList(options.includes)
]
aidl_cmd += [
f,
output
]
build_utils.CheckOutput(aidl_cmd)
with zipfile.ZipFile(options.srcjar, 'w') as srcjar:
for path in build_utils.FindInDirectory(temp_dir, '*.java'):
with open(path) as fileobj:
data = fileobj.read()
pkg_name = re.search(r'^\s*package\s+(.*?)\s*;', data, re.M).group(1)
arcname = '%s/%s' % (pkg_name.replace('.', '/'), os.path.basename(path))
build_utils.AddToZipHermetic(srcjar, arcname, data=data)
if options.depfile:
build_utils.WriteDepfile(options.depfile, options.srcjar)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
babyliynfg/cross
|
refs/heads/master
|
tools/project-creator/Python2.6.6/Lib/test/test_operator.py
|
1
|
import operator
import unittest
from test import test_support
class Seq1:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class Seq2(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class OperatorTestCase(unittest.TestCase):
def test_lt(self):
self.failUnlessRaises(TypeError, operator.lt)
self.failUnlessRaises(TypeError, operator.lt, 1j, 2j)
self.failIf(operator.lt(1, 0))
self.failIf(operator.lt(1, 0.0))
self.failIf(operator.lt(1, 1))
self.failIf(operator.lt(1, 1.0))
self.failUnless(operator.lt(1, 2))
self.failUnless(operator.lt(1, 2.0))
def test_le(self):
self.failUnlessRaises(TypeError, operator.le)
self.failUnlessRaises(TypeError, operator.le, 1j, 2j)
self.failIf(operator.le(1, 0))
self.failIf(operator.le(1, 0.0))
self.failUnless(operator.le(1, 1))
self.failUnless(operator.le(1, 1.0))
self.failUnless(operator.le(1, 2))
self.failUnless(operator.le(1, 2.0))
def test_eq(self):
class C(object):
def __eq__(self, other):
raise SyntaxError
__hash__ = None # Silence Py3k warning
self.failUnlessRaises(TypeError, operator.eq)
self.failUnlessRaises(SyntaxError, operator.eq, C(), C())
self.failIf(operator.eq(1, 0))
self.failIf(operator.eq(1, 0.0))
self.failUnless(operator.eq(1, 1))
self.failUnless(operator.eq(1, 1.0))
self.failIf(operator.eq(1, 2))
self.failIf(operator.eq(1, 2.0))
def test_ne(self):
class C(object):
def __ne__(self, other):
raise SyntaxError
self.failUnlessRaises(TypeError, operator.ne)
self.failUnlessRaises(SyntaxError, operator.ne, C(), C())
self.failUnless(operator.ne(1, 0))
self.failUnless(operator.ne(1, 0.0))
self.failIf(operator.ne(1, 1))
self.failIf(operator.ne(1, 1.0))
self.failUnless(operator.ne(1, 2))
self.failUnless(operator.ne(1, 2.0))
def test_ge(self):
self.failUnlessRaises(TypeError, operator.ge)
self.failUnlessRaises(TypeError, operator.ge, 1j, 2j)
self.failUnless(operator.ge(1, 0))
self.failUnless(operator.ge(1, 0.0))
self.failUnless(operator.ge(1, 1))
self.failUnless(operator.ge(1, 1.0))
self.failIf(operator.ge(1, 2))
self.failIf(operator.ge(1, 2.0))
def test_gt(self):
self.failUnlessRaises(TypeError, operator.gt)
self.failUnlessRaises(TypeError, operator.gt, 1j, 2j)
self.failUnless(operator.gt(1, 0))
self.failUnless(operator.gt(1, 0.0))
self.failIf(operator.gt(1, 1))
self.failIf(operator.gt(1, 1.0))
self.failIf(operator.gt(1, 2))
self.failIf(operator.gt(1, 2.0))
def test_abs(self):
self.failUnlessRaises(TypeError, operator.abs)
self.failUnlessRaises(TypeError, operator.abs, None)
self.failUnless(operator.abs(-1) == 1)
self.failUnless(operator.abs(1) == 1)
def test_add(self):
self.failUnlessRaises(TypeError, operator.add)
self.failUnlessRaises(TypeError, operator.add, None, None)
self.failUnless(operator.add(3, 4) == 7)
def test_bitwise_and(self):
self.failUnlessRaises(TypeError, operator.and_)
self.failUnlessRaises(TypeError, operator.and_, None, None)
self.failUnless(operator.and_(0xf, 0xa) == 0xa)
def test_concat(self):
self.failUnlessRaises(TypeError, operator.concat)
self.failUnlessRaises(TypeError, operator.concat, None, None)
self.failUnless(operator.concat('py', 'thon') == 'python')
self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
self.failUnless(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
self.failUnless(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
self.failUnlessRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
self.failUnlessRaises(TypeError, operator.countOf)
self.failUnlessRaises(TypeError, operator.countOf, None, None)
self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
def test_delitem(self):
a = [4, 3, 2, 1]
self.failUnlessRaises(TypeError, operator.delitem, a)
self.failUnlessRaises(TypeError, operator.delitem, a, None)
self.failUnless(operator.delitem(a, 1) is None)
self.assert_(a == [4, 2, 1])
def test_delslice(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.delslice, a)
self.failUnlessRaises(TypeError, operator.delslice, a, None, None)
self.failUnless(operator.delslice(a, 2, 8) is None)
self.assert_(a == [0, 1, 8, 9])
operator.delslice(a, 0, test_support.MAX_Py_ssize_t)
self.assert_(a == [])
def test_div(self):
self.failUnlessRaises(TypeError, operator.div, 5)
self.failUnlessRaises(TypeError, operator.div, None, None)
self.failUnless(operator.floordiv(5, 2) == 2)
def test_floordiv(self):
self.failUnlessRaises(TypeError, operator.floordiv, 5)
self.failUnlessRaises(TypeError, operator.floordiv, None, None)
self.failUnless(operator.floordiv(5, 2) == 2)
def test_truediv(self):
self.failUnlessRaises(TypeError, operator.truediv, 5)
self.failUnlessRaises(TypeError, operator.truediv, None, None)
self.failUnless(operator.truediv(5, 2) == 2.5)
def test_getitem(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.getitem)
self.failUnlessRaises(TypeError, operator.getitem, a, None)
self.failUnless(operator.getitem(a, 2) == 2)
def test_getslice(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.getslice)
self.failUnlessRaises(TypeError, operator.getslice, a, None, None)
self.failUnless(operator.getslice(a, 4, 6) == [4, 5])
b = operator.getslice(a, 0, test_support.MAX_Py_ssize_t)
self.assert_(b == a)
def test_indexOf(self):
self.failUnlessRaises(TypeError, operator.indexOf)
self.failUnlessRaises(TypeError, operator.indexOf, None, None)
self.failUnless(operator.indexOf([4, 3, 2, 1], 3) == 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
def test_invert(self):
self.failUnlessRaises(TypeError, operator.invert)
self.failUnlessRaises(TypeError, operator.invert, None)
self.failUnless(operator.inv(4) == -5)
def test_isCallable(self):
self.failUnlessRaises(TypeError, operator.isCallable)
class C:
pass
def check(self, o, v):
self.assertEqual(operator.isCallable(o), v)
with test_support._check_py3k_warnings():
self.assertEqual(callable(o), v)
check(self, 4, 0)
check(self, operator.isCallable, 1)
check(self, C, 1)
check(self, C(), 0)
def test_isMappingType(self):
self.failUnlessRaises(TypeError, operator.isMappingType)
self.failIf(operator.isMappingType(1))
self.failIf(operator.isMappingType(operator.isMappingType))
self.failUnless(operator.isMappingType(operator.__dict__))
self.failUnless(operator.isMappingType({}))
def test_isNumberType(self):
self.failUnlessRaises(TypeError, operator.isNumberType)
self.failUnless(operator.isNumberType(8))
self.failUnless(operator.isNumberType(8j))
self.failUnless(operator.isNumberType(8L))
self.failUnless(operator.isNumberType(8.3))
self.failIf(operator.isNumberType(dir()))
def test_isSequenceType(self):
self.failUnlessRaises(TypeError, operator.isSequenceType)
self.failUnless(operator.isSequenceType(dir()))
self.failUnless(operator.isSequenceType(()))
self.failUnless(operator.isSequenceType(xrange(10)))
self.failUnless(operator.isSequenceType('yeahbuddy'))
self.failIf(operator.isSequenceType(3))
class Dict(dict): pass
self.failIf(operator.isSequenceType(Dict()))
def test_lshift(self):
self.failUnlessRaises(TypeError, operator.lshift)
self.failUnlessRaises(TypeError, operator.lshift, None, 42)
self.failUnless(operator.lshift(5, 1) == 10)
self.failUnless(operator.lshift(5, 0) == 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
self.failUnlessRaises(TypeError, operator.mod)
self.failUnlessRaises(TypeError, operator.mod, None, 42)
self.failUnless(operator.mod(5, 2) == 1)
def test_mul(self):
self.failUnlessRaises(TypeError, operator.mul)
self.failUnlessRaises(TypeError, operator.mul, None, None)
self.failUnless(operator.mul(5, 2) == 10)
def test_neg(self):
self.failUnlessRaises(TypeError, operator.neg)
self.failUnlessRaises(TypeError, operator.neg, None)
self.failUnless(operator.neg(5) == -5)
self.failUnless(operator.neg(-5) == 5)
self.failUnless(operator.neg(0) == 0)
self.failUnless(operator.neg(-0) == 0)
def test_bitwise_or(self):
self.failUnlessRaises(TypeError, operator.or_)
self.failUnlessRaises(TypeError, operator.or_, None, None)
self.failUnless(operator.or_(0xa, 0x5) == 0xf)
def test_pos(self):
self.failUnlessRaises(TypeError, operator.pos)
self.failUnlessRaises(TypeError, operator.pos, None)
self.failUnless(operator.pos(5) == 5)
self.failUnless(operator.pos(-5) == -5)
self.failUnless(operator.pos(0) == 0)
self.failUnless(operator.pos(-0) == 0)
def test_pow(self):
self.failUnlessRaises(TypeError, operator.pow)
self.failUnlessRaises(TypeError, operator.pow, None, None)
self.failUnless(operator.pow(3,5) == 3**5)
self.failUnless(operator.__pow__(3,5) == 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_repeat(self):
a = range(3)
self.failUnlessRaises(TypeError, operator.repeat)
self.failUnlessRaises(TypeError, operator.repeat, a, None)
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == [])
a = (1, 2, 3)
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == ())
a = '123'
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == '')
a = Seq1([4, 5, 6])
self.failUnless(operator.repeat(a, 2) == [4, 5, 6, 4, 5, 6])
self.failUnless(operator.repeat(a, 1) == [4, 5, 6])
self.failUnless(operator.repeat(a, 0) == [])
a = Seq2([4, 5, 6])
self.failUnless(operator.repeat(a, 2) == [4, 5, 6, 4, 5, 6])
self.failUnless(operator.repeat(a, 1) == [4, 5, 6])
self.failUnless(operator.repeat(a, 0) == [])
self.failUnlessRaises(TypeError, operator.repeat, 6, 7)
def test_rshift(self):
self.failUnlessRaises(TypeError, operator.rshift)
self.failUnlessRaises(TypeError, operator.rshift, None, 42)
self.failUnless(operator.rshift(5, 1) == 2)
self.failUnless(operator.rshift(5, 0) == 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
self.assertTrue(operator.sequenceIncludes(range(4), 2))
self.assertFalse(operator.sequenceIncludes(range(4), 5))
def test_setitem(self):
a = range(3)
self.failUnlessRaises(TypeError, operator.setitem, a)
self.failUnlessRaises(TypeError, operator.setitem, a, None, None)
self.failUnless(operator.setitem(a, 0, 2) is None)
self.assert_(a == [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_setslice(self):
a = range(4)
self.failUnlessRaises(TypeError, operator.setslice, a)
self.failUnlessRaises(TypeError, operator.setslice, a, None, None, None)
self.failUnless(operator.setslice(a, 1, 3, [2, 1]) is None)
self.assert_(a == [0, 2, 1, 3])
operator.setslice(a, 0, test_support.MAX_Py_ssize_t, [])
self.assert_(a == [])
def test_sub(self):
self.failUnlessRaises(TypeError, operator.sub)
self.failUnlessRaises(TypeError, operator.sub, None, None)
self.failUnless(operator.sub(5, 2) == 3)
def test_truth(self):
class C(object):
def __nonzero__(self):
raise SyntaxError
self.failUnlessRaises(TypeError, operator.truth)
self.failUnlessRaises(SyntaxError, operator.truth, C())
self.failUnless(operator.truth(5))
self.failUnless(operator.truth([0]))
self.failIf(operator.truth(0))
self.failIf(operator.truth([]))
def test_bitwise_xor(self):
self.failUnlessRaises(TypeError, operator.xor)
self.failUnlessRaises(TypeError, operator.xor, None, None)
self.failUnless(operator.xor(0xb, 0xc) == 0x7)
def test_is(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.failUnlessRaises(TypeError, operator.is_)
self.failUnless(operator.is_(a, b))
self.failIf(operator.is_(a,c))
def test_is_not(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.failUnlessRaises(TypeError, operator.is_not)
self.failIf(operator.is_not(a, b))
self.failUnless(operator.is_not(a,c))
def test_attrgetter(self):
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter(2)
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter('x', (), 'y'), record)
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.failUnlessRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.failUnlessRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(map(getcount, inventory), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = map(str, range(20))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
def test_methodcaller(self):
self.assertRaises(TypeError, operator.methodcaller)
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEquals(f(a), 3)
f = operator.methodcaller('bar')
self.assertEquals(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEquals(f(a), 5)
def test_inplace(self):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __idiv__ (self, other): return "idiv"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.idiv (c, 5), "idiv")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
self.assertEqual(operator.irepeat (c, 5), "imul")
self.assertEqual(operator.__iadd__ (c, 5), "iadd")
self.assertEqual(operator.__iand__ (c, 5), "iand")
self.assertEqual(operator.__idiv__ (c, 5), "idiv")
self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
self.assertEqual(operator.__ilshift__ (c, 5), "ilshift")
self.assertEqual(operator.__imod__ (c, 5), "imod")
self.assertEqual(operator.__imul__ (c, 5), "imul")
self.assertEqual(operator.__ior__ (c, 5), "ior")
self.assertEqual(operator.__ipow__ (c, 5), "ipow")
self.assertEqual(operator.__irshift__ (c, 5), "irshift")
self.assertEqual(operator.__isub__ (c, 5), "isub")
self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
self.assertEqual(operator.__ixor__ (c, 5), "ixor")
self.assertEqual(operator.__iconcat__ (c, c), "iadd")
self.assertEqual(operator.__irepeat__ (c, 5), "imul")
def test_main(verbose=None):
import sys
test_classes = (
OperatorTestCase,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
eigenn/flaskengine
|
refs/heads/master
|
tests/views/test_edit.py
|
1
|
from .base import BaseTest, TestModel, BpAppRegister, test_bp
from flaskengine import ModelEdit
class EditTestView(ModelEdit):
admin = False
model = TestModel
EditTestView.register_bp(test_bp)
class TestEditModel(BaseTest):
def setUp(self):
super(TestEditModel, self).setUp()
BpAppRegister(test_bp, self.app)
dummy_data = [('fff', 'ddd'), ('aaa', 'bbb')]
self._generate_data(dummy_data)
def test_form_rendering(self):
"""
TEST MODEL EDIT: test form rendering.
"""
entity = TestModel.query().get()
response = self.client.get('/test/%s/edit/' % entity.key.urlsafe())
self.assert_200(response)
form = self.get_context_variable('form')
for field in form:
entity_values = getattr(entity, field.id)
self.assertEqual(field.data, entity_values)
def test_model_form_editing(self):
"""
TEST MODEL EDIT: test save on post
"""
entity = TestModel.query().get()
form_data = {'test_val_1': 'test', 'test_val_2': 'test'}
response = self.client.post('/test/%s/edit/' % entity.key.urlsafe(),
data=form_data)
self.assert_200(response)
self.assertEqual(form_data, entity.to_dict())
def test_entity_does_not_exist(self):
"""
TEST MODEL EDIT: test for non valid key
"""
response = self.client.post('/test/%s/edit/' % 'idontexist')
self.assertStatus(response, 405)
|
vwvww/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/py/testing/io_/test_capture.py
|
55
|
from __future__ import with_statement
import os, sys
import py
needsdup = py.test.mark.skipif("not hasattr(os, 'dup')")
from py.builtin import print_
if sys.version_info >= (3,0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
class TestTextIO:
def test_text(self):
f = py.io.TextIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = py.io.TextIO()
if sys.version_info >= (3,0):
f.write("\u00f6")
py.test.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
py.test.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from py._io.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
py.test.raises(IOError, f.read)
py.test.raises(IOError, f.readlines)
py.test.raises(IOError, iter, f)
py.test.raises(ValueError, f.fileno)
f.close() # just for completeness
def pytest_funcarg__tmpfile(request):
testdir = request.getfuncargvalue("testdir")
f = testdir.makepyfile("").open('wb+')
request.addfinalizer(f.close)
return f
@needsdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = py.io.dupfile(tmpfile, encoding="utf-8")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print_(i, end="", file=nf)
flist.append(nf)
for i in range(5):
f = flist[i]
f.close()
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
def test_dupfile_no_mode():
"""
dupfile should trap an AttributeError and return f if no mode is supplied.
"""
class SomeFileWrapper(object):
"An object with a fileno method but no mode attribute"
def fileno(self):
return 1
tmpfile = SomeFileWrapper()
assert py.io.dupfile(tmpfile) is tmpfile
with py.test.raises(AttributeError):
py.io.dupfile(tmpfile, raising=True)
def lsof_check(func):
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except py.process.cmdexec.Error:
py.test.skip("could not run 'lsof'")
func()
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsdup
def test_not_now(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd, now=False)
data = tobytes("hello")
os.write(fd, data)
f = cap.done()
s = f.read()
assert not s
cap = py.io.FDCapture(fd, now=False)
cap.start()
os.write(fd, data)
f = cap.done()
s = f.read()
assert s == "hello"
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
f = cap.done()
s = f.read()
assert s == "hello"
f.close()
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, tmpfile):
lsof_check(lambda: self.test_simple_many(tmpfile))
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd)
f = cap.done()
py.test.raises(ValueError, cap.start)
f.close()
def test_stderr(self):
cap = py.io.FDCapture(2, patchsys=True)
print_("hello", file=sys.stderr)
f = cap.done()
s = f.read()
assert s == "hello\n"
def test_stdin(self, tmpfile):
tmpfile.write(tobytes("3"))
tmpfile.seek(0)
cap = py.io.FDCapture(0, tmpfile=tmpfile)
# check with os.read() directly instead of raw_input(), because
# sys.stdin itself may be redirected (as py.test now does by default)
x = os.read(0, 100).strip()
f = cap.done()
assert x == tobytes("3")
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
try:
cap = py.io.FDCapture(tmpfile.fileno())
tmpfile.write(data1)
cap.writeorg(data2)
finally:
tmpfile.close()
f = cap.done()
scap = f.read()
assert scap == totext(data1)
stmp = open(tmpfile.name, 'rb').read()
assert stmp == data2
class TestStdCapture:
def getcapture(self, **kw):
return py.io.StdCapture(**kw)
def test_capturing_done_simple(self):
cap = self.getcapture()
sys.stdout.write("hello")
sys.stderr.write("world")
outfile, errfile = cap.done()
s = outfile.read()
assert s == "hello"
s = errfile.read()
assert s == "world"
def test_capturing_reset_simple(self):
cap = self.getcapture()
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.reset()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
cap = self.getcapture()
try:
print ("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
finally:
out, err = cap.reset()
assert err == "error2"
def test_capturing_readouterr_unicode(self):
cap = self.getcapture()
print ("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@py.test.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
cap = self.getcapture()
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_capturing_mixed(self):
cap = self.getcapture(mixed=True)
sys.stdout.write("hello ")
sys.stderr.write("world")
sys.stdout.write(".")
out, err = cap.reset()
assert out.strip() == "hello world."
assert not err
def test_reset_twice_error(self):
cap = self.getcapture()
print ("hello")
out, err = cap.reset()
py.test.raises(ValueError, cap.reset)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
cap = self.getcapture()
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = py.io.TextIO()
sys.stderr = py.io.TextIO()
print ("not seen")
sys.stderr.write("not seen\n")
out, err = cap.reset()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
cap1 = self.getcapture()
print ("cap1")
cap2 = self.getcapture()
print ("cap2")
out2, err2 = cap2.reset()
out1, err1 = cap1.reset()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
cap = self.getcapture(out=True, err=False)
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.reset()
assert out == "hello"
assert not err
def test_just_err_capture(self):
cap = self.getcapture(out=False, err=True)
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.reset()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
cap = self.getcapture(in_=True)
newstdin = sys.stdin
out, err = cap.reset()
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print ("XXX this test may well hang instead of crashing")
print ("XXX which indicates an error in the underlying capturing")
print ("XXX mechanisms")
cap = self.getcapture()
py.test.raises(IOError, "sys.stdin.read()")
out, err = cap.reset()
def test_suspend_resume(self):
cap = self.getcapture(out=True, err=False, in_=False)
try:
print ("hello")
sys.stderr.write("error\n")
out, err = cap.suspend()
assert out == "hello\n"
assert not err
print ("in between")
sys.stderr.write("in between\n")
cap.resume()
print ("after")
sys.stderr.write("error_after\n")
finally:
out, err = cap.reset()
assert out == "after\n"
assert not err
class TestStdCaptureNotNow(TestStdCapture):
def getcapture(self, **kw):
kw['now'] = False
cap = py.io.StdCapture(**kw)
cap.startall()
return cap
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsdup
def getcapture(self, **kw):
return py.io.StdCaptureFD(**kw)
def test_intermingling(self):
cap = self.getcapture()
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.reset()
assert out == "123"
assert err == "abc"
def test_callcapture(self):
def func(x, y):
print (x)
sys.stderr.write(str(y))
return 42
res, out, err = py.io.StdCaptureFD.call(func, 3, y=4)
assert res == 42
assert out.startswith("3")
assert err.startswith("4")
def test_many(self, capfd):
def f():
for i in range(10):
cap = py.io.StdCaptureFD()
cap.reset()
lsof_check(f)
class TestStdCaptureFDNotNow(TestStdCaptureFD):
pytestmark = needsdup
def getcapture(self, **kw):
kw['now'] = False
cap = py.io.StdCaptureFD(**kw)
cap.startall()
return cap
@needsdup
def test_stdcapture_fd_tmpfile(tmpfile):
capfd = py.io.StdCaptureFD(out=tmpfile)
os.write(1, "hello".encode("ascii"))
os.write(2, "world".encode("ascii"))
outf, errf = capfd.done()
assert outf == tmpfile
class TestStdCaptureFDinvalidFD:
pytestmark = needsdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import py, os
def test_stdout():
os.close(1)
cap = py.io.StdCaptureFD(out=True, err=False, in_=False)
cap.done()
def test_stderr():
os.close(2)
cap = py.io.StdCaptureFD(out=False, err=True, in_=False)
cap.done()
def test_stdin():
os.close(0)
cap = py.io.StdCaptureFD(out=False, err=False, in_=True)
cap.done()
""")
result = testdir.runpytest("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = py.io.StdCapture(now=False)
capsys.done()
capsys.done()
capsys.reset()
@needsdup
def test_capture_no_sys():
capsys = py.io.StdCapture()
try:
cap = py.io.StdCaptureFD(patchsys=False)
sys.stdout.write("hello")
sys.stderr.write("world")
oswritebytes(1, "1")
oswritebytes(2, "2")
out, err = cap.reset()
assert out == "1"
assert err == "2"
finally:
capsys.reset()
@needsdup
def test_callcapture_nofd():
def func(x, y):
oswritebytes(1, "hello")
oswritebytes(2, "hello")
print (x)
sys.stderr.write(str(y))
return 42
capfd = py.io.StdCaptureFD(patchsys=False)
try:
res, out, err = py.io.StdCapture.call(func, 3, y=4)
finally:
capfd.reset()
assert res == 42
assert out.startswith("3")
assert err.startswith("4")
@needsdup
@py.test.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = py.io.StdCaptureFD(out=False, err=tmpfile, now=False)
cap.startall()
capfile = cap.err.tmpfile
cap.suspend()
cap.resume()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@py.test.mark.parametrize('method', ['StdCapture', 'StdCaptureFD'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
py.test.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
cap = py.io.%s(out=False, in_=False)
logging.warn("hello1")
outerr = cap.suspend()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.resume()
logging.warn("hello3")
outerr = cap.suspend()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"suspend, captured*hello1*",
"suspend2, captured*hello2*WARNING:root:hello3*",
])
assert "atexit" not in result.stderr.str()
|
TheMOOCAgency/edx-platform
|
refs/heads/master
|
lms/djangoapps/shoppingcart/exceptions.py
|
191
|
"""
Exceptions for the shoppingcart app
"""
# (Exception Class Names are sort of self-explanatory, so skipping docstring requirement)
# pylint: disable=missing-docstring
class PaymentException(Exception):
pass
class PurchasedCallbackException(PaymentException):
pass
class InvalidCartItem(PaymentException):
pass
class ItemAlreadyInCartException(InvalidCartItem):
pass
class AlreadyEnrolledInCourseException(InvalidCartItem):
pass
class CourseDoesNotExistException(InvalidCartItem):
pass
class CouponDoesNotExistException(InvalidCartItem):
pass
class MultipleCouponsNotAllowedException(InvalidCartItem):
pass
class RedemptionCodeError(Exception):
"""An error occurs while processing redemption codes. """
pass
class ReportException(Exception):
pass
class ReportTypeDoesNotExistException(ReportException):
pass
class InvalidStatusToRetire(Exception):
pass
class UnexpectedOrderItemStatus(Exception):
pass
class ItemNotFoundInCartException(Exception):
pass
|
LuminateWireless/grpc
|
refs/heads/master
|
test/core/http/test_server.py
|
30
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Server for httpcli_test"""
import argparse
import BaseHTTPServer
import os
import ssl
import sys
_PEM = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/lib/tsi/test_creds/server1.pem'))
_KEY = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/lib/tsi/test_creds/server1.key'))
print _PEM
open(_PEM).close()
argp = argparse.ArgumentParser(description='Server for httpcli_test')
argp.add_argument('-p', '--port', default=10080, type=int)
argp.add_argument('-s', '--ssl', default=False, action='store_true')
args = argp.parse_args()
print 'server running on port %d' % args.port
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def good(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>Hello world!</title></head>')
self.wfile.write('<body><p>This is a test</p></body></html>')
def do_GET(self):
if self.path == '/get':
self.good()
def do_POST(self):
content = self.rfile.read(int(self.headers.getheader('content-length')))
if self.path == '/post' and content == 'hello':
self.good()
httpd = BaseHTTPServer.HTTPServer(('localhost', args.port), Handler)
if args.ssl:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=_PEM, keyfile=_KEY, server_side=True)
httpd.serve_forever()
|
imaculate/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/nearest_centroid.py
|
34
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
gnmiller/craig-bot
|
refs/heads/master
|
craig-bot/lib/python3.6/site-packages/pyasn1_modules/rfc1905.py
|
11
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# SNMPv2c PDU syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1905.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1_modules import rfc1902
max_bindings = rfc1902.Integer(2147483647)
class _BindValue(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('value', rfc1902.ObjectSyntax()),
namedtype.NamedType('unSpecified', univ.Null()),
namedtype.NamedType('noSuchObject',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('noSuchInstance',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('endOfMibView',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1902.ObjectName()),
namedtype.NamedType('', _BindValue())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(
0, max_bindings
)
class PDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('error-status', univ.Integer(
namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3),
('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7),
('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10),
('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13),
('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16),
('notWritable', 17), ('inconsistentName', 18)))),
namedtype.NamedType('error-index',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class BulkPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', rfc1902.Integer32()),
namedtype.NamedType('non-repeaters',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('max-repetitions',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class ResponsePDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class GetBulkRequestPDU(BulkPDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
class InformRequestPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
class SNMPv2TrapPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
class ReportPDU(PDU):
tagSet = PDU.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
class PDUs(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
namedtype.NamedType('response', ResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('inform-request', InformRequestPDU()),
namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
namedtype.NamedType('report', ReportPDU())
)
|
Yen-Chung-En/2015cdb_g1_0623-2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/errno.py
|
624
|
"""
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
|
trabacus-softapps/openerp-8.0-cc
|
refs/heads/master
|
openerp/addons/account_asset/report/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gavrieltal/opencog
|
refs/heads/master
|
opencog/nlp/anaphora/agents/testingAgent.py
|
11
|
from __future__ import print_function
from pprint import pprint
from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
from hobbs import HobbsAgent
from dumpAgent import dumpAgent
from opencog.scheme_wrapper import load_scm,scheme_eval_h, __init__
__author__ = 'Hujie Wang'
'''
This agent is purely for testing purposes, which can be used to test hobbsAgent in a standalone atomspace environment.
'''
atomspace = AtomSpace()
__init__(atomspace)
data=["opencog/scm/config.scm",
"opencog/scm/core_types.scm",
"spacetime/spacetime_types.scm",
"opencog/nlp/types/nlp_types.scm",
"opencog/dynamics/attention/attention_types.scm",
"opencog/embodiment/AtomSpaceExtensions/embodiment_types.scm",
"opencog/scm/apply.scm",
"opencog/scm/file-utils.scm",
"opencog/scm/persistence.scm",
#"opencog/scm/repl-shell.scm",
"opencog/scm/utilities.scm",
"opencog/scm/av-tv.scm",
"opencog/nlp/scm/type-definitions.scm",
"opencog/nlp/scm/config.scm",
"opencog/nlp/scm/file-utils.scm",
"opencog/nlp/scm/nlp-utils.scm",
"opencog/nlp/scm/disjunct-list.scm",
"opencog/nlp/scm/processing-utils.scm",
"opencog/nlp/anaphora/tests/atomspace.log"
]
#status2 = load_scm(atomspace, "opencog/nlp/anaphora/tests/atomspace.scm")
for item in data:
load_scm(atomspace, item)
#init=initAgent()
#init.run(atomspace)
dump=dumpAgent()
dump.run(atomspace)
hobbsAgent = HobbsAgent()
hobbsAgent.run(atomspace)
|
acsone/hr
|
refs/heads/8.0
|
hr_expense_sequence/__openerp__.py
|
13
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Odoo Source Management Solution
# Copyright (c) 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR expense sequence',
'version': '8.0.1.0.0',
'category': 'HR',
'author': "Serv. Tecnol. Avanzados - Pedro M. Baeza,"
"Odoo Community Association (OCA)",
'website': 'http://www.serviciosbaeza.com',
'depends': [
'hr_expense',
],
'data': [
'data/hr_expense_data.xml',
'views/hr_expense_expense_view.xml',
],
"installable": True,
"post_init_hook": "assign_old_sequences",
}
|
HeinAtCERN/MyUtility
|
refs/heads/master
|
PythonUtil/python/utility.py
|
2
|
import FWCore.ParameterSet.Config as cms
def make_histo_analyzer(
src,
plot_quantity,
n_bins,
low=0.,
high=0.,
x_label="",
y_label="number of candidates",
weights=""
):
"""tokens: (low, high, n_bins, x_label, y_label)"""
if not x_label:
x_label = plot_quantity
histo_analyzer = cms.EDAnalyzer(
"CandViewHistoAnalyzer",
src=cms.InputTag(src),
histograms=cms.VPSet(
cms.PSet(
lazyParsing=cms.untracked.bool(True),
min=cms.untracked.double(low),
max=cms.untracked.double(high),
nbins=cms.untracked.int32(n_bins),
name=cms.untracked.string("histo"),
description=cms.untracked.string(
";" + x_label + ";" + y_label
),
plotquantity=cms.untracked.string(plot_quantity),
)
)
)
if weights:
histo_analyzer.histograms[0].weights = cms.untracked.InputTag(weights)
return histo_analyzer
|
bepitulaz/huntingdimana
|
refs/heads/master
|
env/Lib/site-packages/pip/_vendor/distlib/version.py
|
426
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
jhseu/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/distribute/keras_premade_models_test.py
|
1
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras premade models using tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.premade import linear
from tensorflow.python.keras.premade import wide_deep
def strategy_combinations_eager_data_fn():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['eager'],
data_fn=[get_numpy, get_dataset])
def get_numpy():
inputs = np.random.uniform(low=-5, high=5, size=(64, 2)).astype(np.float32)
output = .3 * inputs[:, 0] + .2 * inputs[:, 1]
return inputs, output
def get_dataset():
inputs, output = get_numpy()
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, output))
dataset = dataset.batch(10).repeat(10)
return dataset
class KerasPremadeModelsTest(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations_eager_data_fn())
def test_linear_model(self, distribution, data_fn):
with distribution.scope():
model = linear.LinearModel()
opt = gradient_descent.SGD(learning_rate=0.1)
model.compile(opt, 'mse', experimental_run_tf_function=True)
if data_fn == get_numpy:
inputs, output = get_numpy()
hist = model.fit(inputs, output, epochs=5)
else:
hist = model.fit(get_dataset(), epochs=5)
self.assertLess(hist.history['loss'][4], 0.2)
@combinations.generate(strategy_combinations_eager_data_fn())
def test_wide_deep_model(self, distribution, data_fn):
with distribution.scope():
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_opt = gradient_descent.SGD(learning_rate=0.05)
dnn_opt = adagrad.Adagrad(learning_rate=0.1)
wide_deep_model.compile(
optimizer=[linear_opt, dnn_opt],
loss='mse',
experimental_run_tf_function=True)
if data_fn == get_numpy:
inputs, output = get_numpy()
hist = wide_deep_model.fit(inputs, output, epochs=5)
else:
hist = wide_deep_model.fit(get_dataset(), epochs=5)
self.assertLess(hist.history['loss'][4], 0.2)
if __name__ == '__main__':
test.main()
|
gmsn-ita/vaspirin
|
refs/heads/master
|
vaspirin/procar.py
|
2
|
import sys, os, shutil
from . import projection
class PROCAR (object):
'''
Deals with PROCAR-related information, such as band composition, projection
onto orbitals and atomic sites. The PROCAR file should be passed as input to this class.
The PROCAR class cannot deals with very large (~ GB) PROCAR files, since a memory error
happens in this case. To solve this problem, another class named PROCAR_splitter has been created to directly create the .dat file from a large PROCAR file.
'''
def __init__ (self, fProcar, projection, nKPTignore = 0):
self.nKPTignore = nKPTignore
"""
Number of k-points to be ignored
"""
self.nKpoints,self.nBands,self.nIons = self.readHeader (fProcar)
"""
Number of k-points, bands and ions in the system
"""
self.orbitalContributions = self.readOrbitalContribution (fProcar)
"""
Reads the composition of the bands, for each k-point, projected onto atomic orbitals
"""
self.ionContributions = self.readIonContribution (fProcar)
"""
Reads the composition of the bands, for each k-point, projected onto atomic sites
"""
self.prj = projection
"""
PROJECTION information
"""
self.materialContributions = []
"""
For projecting bands onto groups of atomic sites (materials)
"""
self.sumContributions()
def readHeader (self,fProcar):
'''
Reads the number of k-points, bands and ions in the simulation from
the header of the PROCAR file. Uses only the first two lines.
'''
try:
with open(fProcar,'r') as f:
# 1st line: comment
f.readline()
# 2nd line: important information!
header = f.readline().strip()
except FileNotFoundError:
print ("PROCAR file not found! Exiting...\n")
sys.exit (1)
# header read!
nkpt = int(header.split(':')[1].split()[0])
nbands = int(header.split(':')[2].split()[0])
nions = int(header.split(':')[3].split()[0])
return nkpt,nbands,nions
def readOrbitalContribution (self,fProcar):
"""
Creates a matrix containing the contribution of each orbital.
"""
try:
with open(fProcar,'r') as fileIn:
procar = fileIn.read()
except FileNotFoundError:
print ("PROCAR file not found! Exiting...\n")
sys.exit (1)
## contributions[kpoint][band] returns the list [s,px+py,pz,d]
contributions = []
kptBlock = procar.split('k-point')
## The first two blocks are the header, thus should be ignored
## Loops over each k-point
for k in range (2 + self.nKPTignore,len(kptBlock)):
contributions.append([])
## Splits the k-point block into bands
bands = kptBlock[k].split('band')
## Loops over each band, ignoring the first block (the header)
for j in range (1,self.nBands+1):
contributions[k - (self.nKPTignore + 2)].append([])
lines = bands[j].split('\n')
## The line 3+self.nIons represents the total contribution in terms of
## atomic orbitals. It is about the last line in the block being manipulated
totCont = float(lines[3+self.nIons].split()[10])
if totCont > 0:
sCont = float(lines[3+self.nIons].split()[1])/totCont
pyCont = float(lines[3+self.nIons].split()[2])/totCont
pzCont = float(lines[3+self.nIons].split()[3])/totCont
pxCont = float(lines[3+self.nIons].split()[4])/totCont
dxyCont = float(lines[3+self.nIons].split()[5])/totCont
dyzCont = float(lines[3+self.nIons].split()[6])/totCont
dz2Cont = float(lines[3+self.nIons].split()[7])/totCont
dxzCont = float(lines[3+self.nIons].split()[8])/totCont
dx2Cont = float(lines[3+self.nIons].split()[9])/totCont
contributions[k-self.nKPTignore-2][j-1].extend([sCont, pyCont + pxCont, pzCont, dxyCont + dyzCont + dz2Cont + dxzCont + dx2Cont])
## Dangerous part of the code:
## To tweak the contributions as wanted
## Implement later on this on a script...
#~ contributions[k-self.nKPTignore-2][j-1].extend([4*dz2Cont, 0, 4*dxzCont, 0])
else:
contributions[k-self.nKPTignore-2][j-1].extend([0,0,0,0])
return contributions
def readIonContribution (self,fProcar):
"""
Reads the relative contribution of all ions to the formation of the band, for each k-point.
Allows to study the character of the band.
"""
try:
with open(fProcar,'r') as fileIn:
procar = fileIn.read()
except FileNotFoundError:
print ("PROCAR file not found! Exiting...\n")
sys.exit (1)
## contributions[k-point][band][ion]
contributions = []
kptBlock = procar.split('k-point')
## Loops over each k-point, ignoring the header and the first k-points (if applicable)
for k in range (2 + self.nKPTignore,len(kptBlock)):
contributions.append([])
bands = kptBlock[k].split('band')
## Now loops over each band
for j in range (1,self.nBands+1):
contributions[k-self.nKPTignore-2].append([])
lines = bands[j].split('\n')
## Total contribution for the specified k-point and band
totCont = float(lines[3+self.nIons].split()[10])
if totCont > 0:
ionsContributionsThisBand = []
## Loops over all ions to get their contribution to the band
for i in range(self.nIons):
## The first ion is seen in lines[3] and the block index 10 (11th column) is the ionic contribution to the system
thisIonContribution = float(lines[3+i].split()[10])/totCont
ionsContributionsThisBand.append(thisIonContribution)
## Saves the information and goes on to a new band
contributions[k-self.nKPTignore-2][j-1].extend(ionsContributionsThisBand)
else:
contributions[k-self.nKPTignore-2][j-1].extend([0]*self.nIons)
return contributions
def sumContributions (self):
"""
Sum the contributions from the ions into N materials based on the list ionsVsMaterials.
The list ionsVsMaterials simply labels the ions to be summed.
"""
## Variable to store the contributions of the N materials
## projectedContribution [k-point][band][material in index form]
projectedContributions = []
## Loops over each k-point not ignored
for kpt in range(self.nKpoints - self.nKPTignore):
projectedContributions.append ([])
for band in range (self.nBands):
projectedContributions[kpt].append ([])
projectedContributions[kpt][band].extend ([0]*len(self.prj.dictMaterials))
for eachIon in range(self.nIons):
## Variable which groups ions pertaining to the same material
ionLabel = self.prj.dictMaterials.get(self.prj.ionsVsMaterials[eachIon])
## Sums contribution of ions labeled together
projectedContributions[kpt][band][ionLabel] += self.ionContributions[kpt][band][eachIon]
self.materialContributions = projectedContributions
return
|
andrewnc/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/__init__.py
|
268
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
msebire/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/tests/geo3d/views.py
|
6027
|
# Create your views here.
|
chaomodus/pixywerk
|
refs/heads/master
|
pixywerk/__init__.py
|
1
|
"""PixyWerk - a framework for serving metadata-rich, templated files for the web."""
from . import simpleconfig
from . import utils
from . import werk
from . import wsgi
from .version import *
|
fellchase/4chan-media-downloader
|
refs/heads/master
|
bs4/tests/test_docs.py
|
607
|
"Test harness for doctests."
# pylint: disable-msg=E0611,W0142
__metaclass__ = type
__all__ = [
'additional_tests',
]
import atexit
import doctest
import os
#from pkg_resources import (
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
import unittest
DOCTEST_FLAGS = (
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
# def additional_tests():
# "Run the doc tests (README.txt and docs/*, if any exist)"
# doctest_files = [
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
# if resource_exists('bs4', 'docs'):
# for name in resource_listdir('bs4', 'docs'):
# if name.endswith('.txt'):
# doctest_files.append(
# os.path.abspath(
# resource_filename('bs4', 'docs/%s' % name)))
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
# atexit.register(cleanup_resources)
# return unittest.TestSuite((
# doctest.DocFileSuite(*doctest_files, **kwargs)))
|
yannrouillard/weboob
|
refs/heads/master
|
modules/taz/pages/article.py
|
11
|
"ArticlePage object for Taz newspaper"
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage,\
try_drop_tree, clean_relativ_urls
class ArticlePage(GenericNewsPage):
"ArticlePage object for taz"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "title"
self.element_author_selector = ".content-author>a"
def get_body(self):
div = self.document.getroot().find('.//div[@class="sectbody"]')
try_drop_tree(self.parser, div, "div.anchor")
clean_relativ_urls(div, "http://taz.de")
return self.parser.tostring(div)
def get_title(self):
title = GenericNewsPage.get_title(self)
return title
def get_author(self):
author = self.document.getroot().xpath('//span[@class="author"]')
if author:
return author[0].text.replace('von ', '')
|
carze/cutlass
|
refs/heads/master
|
examples/host_seq_prep.py
|
2
|
#!/usr/bin/env python
# pylint: disable=C0111, C0325
import logging
import sys
from pprint import pprint
from cutlass import HostSeqPrep
from cutlass import iHMPSession
username = "test"
password = "test"
def set_logging():
""" Setup logging. """
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
set_logging()
session = iHMPSession(username, password)
mims = {
"adapters": "test",
"annot_source": "test",
"assembly": "test",
"assembly_name": "test",
"biome": "test",
"collection_date": "test",
"env_package": "test",
"extrachrom_elements": "test",
"encoded_traits": "test",
"experimental_factor": "test",
"feature": "test",
"findex": "test",
"finishing_strategy": "test",
"geo_loc_name": "test",
"investigation_type": "test",
"lat_lon": "test",
"lib_const_meth": "test",
"lib_reads_seqd": "test",
"lib_screen": "test",
"lib_size": 2000,
"lib_vector": "test",
"material": "test",
"nucl_acid_amp": "test",
"nucl_acid_ext": "test",
"project_name": "test",
"rel_to_oxygen": "test",
"rindex": "test",
"samp_collect_device": "test",
"samp_mat_process": "test",
"samp_size": "test",
"seq_meth": "test",
"sop": ["a", "b", "c"],
"source_mat_id": ["a", "b", "c"],
"submitted_to_insdc": True,
"url": ["a", "b", "c"]
}
print("Required fields: ")
print(HostSeqPrep.required_fields())
test_prep = HostSeqPrep()
# Required
test_prep.comment = "test comment. Hello world!"
test_prep.lib_layout = "test lib_layout"
test_prep.lib_selection = "test lib_selection"
test_prep.sequencing_center = "test center"
test_prep.sequencing_contact = "test contact"
test_prep.storage_duration = 3
test_prep.ncbi_taxon_id = "NCBI123ABC"
test_prep.prep_id = "test prep id"
test_prep.links = {"prepared_from": ["610a4911a5ca67de12cdc1e4b4011876"]}
test_prep.tags = ["test", "host_seq_prep", "ihmp"]
test_prep.add_tag("another")
test_prep.add_tag("and_another")
# Optional
test_prep.adapters = "test adapters"
test_prep.experimental_factor = "test exp factor"
test_prep.frag_size = 2
test_prep.lib_const_meth = "test lib_const_meth"
test_prep.lib_size = 1313
test_prep.mims = mims
test_prep.nucl_acid_amp = "test nucl_acid_amp"
test_prep.nucl_acid_ext = "test nucl_acid_ext"
print(test_prep.to_json(indent=2))
if test_prep.is_valid():
print("Valid!")
success = test_prep.save()
if success:
prep_id = test_prep.id
print("Successfully saved prep. ID: %s" % prep_id)
prep2 = test_prep.load(prep_id)
print(prep2.to_json(indent=4))
deletion_success = test_prep.delete()
if deletion_success:
print("Deleted prep with ID %s" % prep_id)
else:
print("Deletion of prep %s failed." % prep_id)
else:
print("Save failed")
else:
print("Invalid...")
validation_errors = test_prep.validate()
pprint(validation_errors)
|
brentn/plover
|
refs/heads/master
|
plover/gui/lookup.py
|
7
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import wx
from wx.lib.utils import AdjustRectToScreen
import sys
from plover.steno import normalize_steno
import plover.gui.util as util
TITLE = 'Plover: Lookup'
class LookupDialog(wx.Dialog):
BORDER = 3
TRANSLATION_TEXT = 'Text:'
other_instances = []
def __init__(self, parent, engine, config):
pos = (config.get_lookup_frame_x(),
config.get_lookup_frame_y())
wx.Dialog.__init__(self, parent, wx.ID_ANY, TITLE,
pos, wx.DefaultSize,
wx.DEFAULT_DIALOG_STYLE, wx.DialogNameStr)
self.config = config
# components
self.translation_text = wx.TextCtrl(self, style=wx.TE_PROCESS_ENTER)
cancel = wx.Button(self, id=wx.ID_CANCEL)
self.listbox = wx.ListBox(self, size=wx.Size(210, 200))
# layout
global_sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label=self.TRANSLATION_TEXT)
sizer.Add(label,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(self.translation_text,
flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(cancel,
flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
global_sizer.Add(sizer)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.listbox,
flag=wx.ALL | wx.FIXED_MINSIZE,
border=self.BORDER)
global_sizer.Add(sizer)
self.SetAutoLayout(True)
self.SetSizer(global_sizer)
global_sizer.Fit(self)
global_sizer.SetSizeHints(self)
self.Layout()
self.SetRect(AdjustRectToScreen(self.GetRect()))
# events
# The reason for the focus event here is to skip focus on tab traversal
# of the buttons. But it seems that on windows this prevents the button
# from being pressed. Leave this commented out until that problem is
# resolved.
#button.Bind(wx.EVT_SET_FOCUS, self.on_button_gained_focus)
cancel.Bind(wx.EVT_BUTTON, self.on_close)
#cancel.Bind(wx.EVT_SET_FOCUS, self.on_button_gained_focus)
self.translation_text.Bind(wx.EVT_TEXT, self.on_translation_change)
self.translation_text.Bind(wx.EVT_SET_FOCUS, self.on_translation_gained_focus)
self.translation_text.Bind(wx.EVT_KILL_FOCUS, self.on_translation_lost_focus)
self.translation_text.Bind(wx.EVT_TEXT_ENTER, self.on_close)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_MOVE, self.on_move)
self.engine = engine
# TODO: add functions on engine for state
self.previous_state = self.engine.translator.get_state()
# TODO: use state constructor?
self.engine.translator.clear_state()
self.translation_state = self.engine.translator.get_state()
self.engine.translator.set_state(self.previous_state)
self.last_window = util.GetForegroundWindow()
# Now that we saved the last window we'll close other instances. This
# may restore their original window but we've already saved ours so it's
# fine.
for instance in self.other_instances:
instance.Close()
del self.other_instances[:]
self.other_instances.append(self)
def on_close(self, event=None):
self.engine.translator.set_state(self.previous_state)
try:
util.SetForegroundWindow(self.last_window)
except:
pass
self.other_instances.remove(self)
self.Destroy()
def on_translation_change(self, event):
# TODO: normalize dict entries to make reverse lookup more reliable with
# whitespace.
translation = event.GetString().strip()
self.listbox.Clear()
if translation:
d = self.engine.get_dictionary()
strokes_list = d.reverse_lookup(translation)
if strokes_list:
entries = ('/'.join(x) for x in strokes_list)
for str in entries:
self.listbox.Append(str)
else:
self.listbox.Append('No entries')
self.GetSizer().Layout()
def on_translation_gained_focus(self, event):
self.engine.translator.set_state(self.translation_state)
def on_translation_lost_focus(self, event):
self.engine.translator.set_state(self.previous_state)
def on_button_gained_focus(self, event):
self.strokes_text.SetFocus()
def on_move(self, event):
pos = self.GetScreenPositionTuple()
self.config.set_lookup_frame_x(pos[0])
self.config.set_lookup_frame_y(pos[1])
event.Skip()
def _normalized_strokes(self):
strokes = self.strokes_text.GetValue().upper().replace('/', ' ').split()
strokes = normalize_steno('/'.join(strokes))
return strokes
def Show(parent, engine, config):
dialog_instance = LookupDialog(parent, engine, config)
dialog_instance.Show()
dialog_instance.Raise()
dialog_instance.translation_text.SetFocus()
util.SetTopApp()
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
common/djangoapps/third_party_auth/tests/utils.py
|
19
|
"""Common utility for testing third party oauth2 features."""
import json
import httpretty
from provider.constants import PUBLIC
from provider.oauth2.models import Client
from social_core.backends.facebook import FacebookOAuth2, API_VERSION as FACEBOOK_API_VERSION
from social_django.models import UserSocialAuth, Partial
from student.tests.factories import UserFactory
from .testutil import ThirdPartyAuthTestMixin
@httpretty.activate
class ThirdPartyOAuthTestMixin(ThirdPartyAuthTestMixin):
"""
Mixin with tests for third party oauth views. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
social_uid = "test_social_uid"
access_token = "test_access_token"
client_id = "test_client_id"
CREATE_USER = True
def setUp(self):
super(ThirdPartyOAuthTestMixin, self).setUp()
if self.CREATE_USER:
self.user = UserFactory()
UserSocialAuth.objects.create(user=self.user, provider=self.BACKEND, uid=self.social_uid)
self.oauth_client = self._create_client()
if self.BACKEND == 'google-oauth2':
self.configure_google_provider(enabled=True, visible=True)
elif self.BACKEND == 'facebook':
self.configure_facebook_provider(enabled=True, visible=True)
def tearDown(self):
super(ThirdPartyOAuthTestMixin, self).tearDown()
Partial.objects.all().delete()
def _create_client(self):
"""
Create an OAuth2 client application
"""
return Client.objects.create(
client_id=self.client_id,
client_type=PUBLIC,
)
def _setup_provider_response(self, success=False, email=''):
"""
Register a mock response for the third party user information endpoint;
success indicates whether the response status code should be 200 or 400
"""
if success:
status = 200
response = {self.UID_FIELD: self.social_uid}
if email:
response.update({'email': email})
body = json.dumps(response)
else:
status = 400
body = json.dumps({})
self._setup_provider_response_with_body(status, body)
def _setup_provider_response_with_body(self, status, body):
"""
Register a mock response for the third party user information endpoint with given status and body.
"""
httpretty.register_uri(
httpretty.GET,
self.USER_URL,
body=body,
status=status,
content_type="application/json",
)
class ThirdPartyOAuthTestMixinFacebook(object):
"""Tests oauth with the Facebook backend"""
BACKEND = "facebook"
USER_URL = FacebookOAuth2.USER_DATA_URL.format(version=FACEBOOK_API_VERSION)
# In facebook responses, the "id" field is used as the user's identifier
UID_FIELD = "id"
class ThirdPartyOAuthTestMixinGoogle(object):
"""Tests oauth with the Google backend"""
BACKEND = "google-oauth2"
USER_URL = "https://www.googleapis.com/plus/v1/people/me"
# In google-oauth2 responses, the "email" field is used as the user's identifier
UID_FIELD = "email"
|
kisna72/django
|
refs/heads/master
|
tests/generic_inline_admin/admin.py
|
513
|
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import (
Category, Contact, Episode, EpisodePermanent, Media, PhoneNumber,
)
site = admin.AdminSite(name="admin")
class MediaInline(GenericTabularInline):
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline,
]
class PhoneNumberInline(GenericTabularInline):
model = PhoneNumber
class MediaPermanentInline(GenericTabularInline):
model = Media
can_delete = False
site.register(Episode, EpisodeAdmin)
site.register(Contact, inlines=[PhoneNumberInline])
site.register(Category)
site.register(EpisodePermanent, inlines=[MediaPermanentInline])
|
richardtran415/pymatgen
|
refs/heads/master
|
pymatgen/io/abinit/tests/test_netcdf.py
|
5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.abinit import ETSF_Reader
from pymatgen.util.testing import PymatgenTest
try:
import netCDF4
except ImportError:
netCDF4 = None
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class ETSF_Reader_TestCase(PymatgenTest):
def setUp(self):
formulas = [
"Si2",
]
self.GSR_paths = d = {}
for formula in formulas:
d[formula] = ref_file(formula + "_GSR.nc")
@unittest.skipIf(netCDF4 is None, "Requires Netcdf4")
def test_read_Si2(self):
path = self.GSR_paths["Si2"]
ref_dims = {"number_of_spins": 1}
ref_int_values = {
"space_group": 227,
"number_of_states": np.reshape([15, 15], (1, 2)),
}
ref_float_values = {
"etotal": -8.85911566912484,
"primitive_vectors": np.reshape([0, 5.125, 5.125, 5.125, 0, 5.125, 5.125, 5.125, 0], (3, 3)),
}
with ETSF_Reader(path) as data:
self.assertEqual(data.ngroups, 1)
print(data.read_varnames())
# Test dimensions.
for dimname, int_ref in ref_dims.items():
value = data.read_dimvalue(dimname)
self.assertArrayEqual(value, int_ref)
# Test int variables
for varname, int_ref in ref_int_values.items():
value = data.read_value(varname)
print(varname, value)
self.assertArrayEqual(value, int_ref)
# Test float variables
for varname, float_ref in ref_float_values.items():
value = data.read_value(varname)
print(varname, value)
self.assertArrayAlmostEqual(value, float_ref)
# assert 0
# Reading non-existent variables or dims should raise
# a subclass of NetcdReaderError
with self.assertRaises(data.Error):
data.read_value("foobar")
with self.assertRaises(data.Error):
data.read_dimvalue("foobar")
# Unless default is given
assert data.read_value("foobar", default=None) is None
data.print_tree()
for group in data.walk_tree():
print("group: " + str(group))
# Initialize pymatgen structure from GSR.
structure = data.read_structure()
self.assertTrue(isinstance(structure, Structure))
# Read ixc.
# TODO: Upgrade GSR file.
# xc = data.read_abinit_xcfunc()
# assert xc == "LDA"
|
nphyx/jsvolume
|
refs/heads/master
|
node_modules/jsdoc/node_modules/esprima/tools/generate-unicode-regex.py
|
260
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <utatane.tea@gmail.com>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
|
dmwyatt/django-rest-framework
|
refs/heads/master
|
rest_framework/throttling.py
|
2
|
"""
Provides various throttling policies.
"""
from __future__ import unicode_literals
import time
from django.core.cache import cache as default_cache
from django.core.exceptions import ImproperlyConfigured
from rest_framework.settings import api_settings
class BaseThrottle(object):
"""
Rate throttling of requests.
"""
def allow_request(self, request, view):
"""
Return `True` if the request should be allowed, `False` otherwise.
"""
raise NotImplementedError('.allow_request() must be overridden')
def get_ident(self, request):
"""
Identify the machine making the request by parsing HTTP_X_FORWARDED_FOR
if present and number of proxies is > 0. If not use all of
HTTP_X_FORWARDED_FOR if it is available, if not use REMOTE_ADDR.
"""
xff = request.META.get('HTTP_X_FORWARDED_FOR')
remote_addr = request.META.get('REMOTE_ADDR')
num_proxies = api_settings.NUM_PROXIES
if num_proxies is not None:
if num_proxies == 0 or xff is None:
return remote_addr
addrs = xff.split(',')
client_addr = addrs[-min(num_proxies, len(addrs))]
return client_addr.strip()
return ''.join(xff.split()) if xff else remote_addr
def wait(self):
"""
Optionally, return a recommended number of seconds to wait before
the next request.
"""
return None
class SimpleRateThrottle(BaseThrottle):
"""
A simple cache implementation, that only requires `.get_cache_key()`
to be overridden.
The rate (requests / seconds) is set by a `throttle` attribute on the View
class. The attribute is a string of the form 'number_of_requests/period'.
Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day')
Previous request information used for throttling is stored in the cache.
"""
cache = default_cache
timer = time.time
cache_format = 'throttle_%(scope)s_%(ident)s'
scope = None
THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES
def __init__(self):
if not getattr(self, 'rate', None):
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
def get_cache_key(self, request, view):
"""
Should return a unique cache-key which can be used for throttling.
Must be overridden.
May return `None` if the request should not be throttled.
"""
raise NotImplementedError('.get_cache_key() must be overridden')
def get_rate(self):
"""
Determine the string representation of the allowed request rate.
"""
if not getattr(self, 'scope', None):
msg = ("You must set either `.scope` or `.rate` for '%s' throttle" %
self.__class__.__name__)
raise ImproperlyConfigured(msg)
try:
return self.THROTTLE_RATES[self.scope]
except KeyError:
msg = "No default throttle rate set for '%s' scope" % self.scope
raise ImproperlyConfigured(msg)
def parse_rate(self, rate):
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
"""
if rate is None:
return (None, None)
num, period = rate.split('/')
num_requests = int(num)
duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]
return (num_requests, duration)
def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
if self.rate is None:
return True
self.key = self.get_cache_key(request, view)
if self.key is None:
return True
self.history = self.cache.get(self.key, [])
self.now = self.timer()
# Drop any requests from the history which have now passed the
# throttle duration
while self.history and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
return self.throttle_failure()
return self.throttle_success()
def throttle_success(self):
"""
Inserts the current request's timestamp along with the key
into the cache.
"""
self.history.insert(0, self.now)
self.cache.set(self.key, self.history, self.duration)
return True
def throttle_failure(self):
"""
Called when a request to the API has failed due to throttling.
"""
return False
def wait(self):
"""
Returns the recommended next request time in seconds.
"""
if self.history:
remaining_duration = self.duration - (self.now - self.history[-1])
else:
remaining_duration = self.duration
available_requests = self.num_requests - len(self.history) + 1
if available_requests <= 0:
return None
return remaining_duration / float(available_requests)
class AnonRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a anonymous users.
The IP address of the request will be used as the unique cache key.
"""
scope = 'anon'
def get_cache_key(self, request, view):
if request.user.is_authenticated:
return None # Only throttle unauthenticated requests.
return self.cache_format % {
'scope': self.scope,
'ident': self.get_ident(request)
}
class UserRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a given user.
The user id will be used as a unique cache key if the user is
authenticated. For anonymous requests, the IP address of the request will
be used.
"""
scope = 'user'
def get_cache_key(self, request, view):
if request.user.is_authenticated:
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
class ScopedRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls by different amounts for various parts of
the API. Any view that has the `throttle_scope` property set will be
throttled. The unique cache key will be generated by concatenating the
user id of the request, and the scope of the view being accessed.
"""
scope_attr = 'throttle_scope'
def __init__(self):
# Override the usual SimpleRateThrottle, because we can't determine
# the rate until called by the view.
pass
def allow_request(self, request, view):
# We can only determine the scope once we're called by the view.
self.scope = getattr(view, self.scope_attr, None)
# If a view does not have a `throttle_scope` always allow the request
if not self.scope:
return True
# Determine the allowed request rate as we normally would during
# the `__init__` call.
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
# We can now proceed as normal.
return super(ScopedRateThrottle, self).allow_request(request, view)
def get_cache_key(self, request, view):
"""
If `view.throttle_scope` is not set, don't apply this throttle.
Otherwise generate the unique cache key by concatenating the user id
with the '.throttle_scope` property of the view.
"""
if request.user.is_authenticated:
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
|
zouyapeng/horizon
|
refs/heads/stable/juno
|
openstack_dashboard/dashboards/project/volumes/backups/tests.py
|
6
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_BACKUPS_TAB_URL = reverse('horizon:project:volumes:backups_tab')
class VolumeBackupsViewTests(test.TestCase):
@test.create_stubs({api.cinder: ('volume_backup_create',)})
def test_create_backup_post(self):
volume = self.volumes.first()
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_create(IsA(http.HttpRequest),
volume.id,
backup.container_name,
backup.name,
backup.description) \
.AndReturn(backup)
self.mox.ReplayAll()
formData = {'method': 'CreateBackupForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'container_name': backup.container_name,
'name': backup.name,
'description': backup.description}
url = reverse('horizon:project:volumes:volumes:create_backup',
args=[volume.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=0)
self.assertRedirectsNoFollow(res, VOLUME_BACKUPS_TAB_URL)
@test.create_stubs({api.cinder: ('volume_list',
'volume_backup_supported',
'volume_backup_list',
'volume_backup_delete')})
def test_delete_volume_backup(self):
vol_backups = self.cinder_volume_backups.list()
volumes = self.cinder_volumes.list()
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
api.cinder.volume_backup_list(IsA(http.HttpRequest)). \
AndReturn(vol_backups)
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
api.cinder.volume_backup_delete(IsA(http.HttpRequest), backup.id)
api.cinder.volume_backup_list(IsA(http.HttpRequest)). \
AndReturn(vol_backups)
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
self.mox.ReplayAll()
formData = {'action':
'volume_backups__delete__%s' % backup.id}
res = self.client.post(INDEX_URL +
"?tab=volumes_and_snapshots__backups_tab",
formData, follow=True)
self.assertIn("Scheduled deletion of Volume Backup: backup1",
[m.message for m in res.context['messages']])
@test.create_stubs({api.cinder: ('volume_backup_get', 'volume_get')})
def test_volume_backup_detail_get(self):
backup = self.cinder_volume_backups.first()
volume = self.cinder_volumes.get(id=backup.volume_id)
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id). \
AndReturn(backup)
api.cinder.volume_get(IsA(http.HttpRequest), backup.volume_id). \
AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertContains(res,
"<h2>Volume Backup Details: %s</h2>" %
backup.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.name, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dt>Volume</dt>", 1, 200)
@test.create_stubs({api.cinder: ('volume_backup_get',)})
def test_volume_backup_detail_get_with_exception(self):
# Test to verify redirect if get volume backup fails
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.cinder: ('volume_backup_get', 'volume_get')})
def test_volume_backup_detail_with_missing_volume(self):
# Test to check page still loads even if volume is deleted
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id). \
AndReturn(backup)
api.cinder.volume_get(IsA(http.HttpRequest), backup.volume_id). \
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertContains(res,
"<h2>Volume Backup Details: %s</h2>" %
backup.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.name, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dt>Volume</dt>", 0, 200)
@test.create_stubs({api.cinder: ('volume_list',
'volume_backup_restore',)})
def test_restore_backup(self):
backup = self.cinder_volume_backups.first()
volumes = self.cinder_volumes.list()
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
api.cinder.volume_backup_restore(IsA(http.HttpRequest),
backup.id,
backup.volume_id). \
AndReturn(backup)
self.mox.ReplayAll()
formData = {'method': 'RestoreBackupForm',
'backup_id': backup.id,
'backup_name': backup.name,
'volume_id': backup.volume_id}
url = reverse('horizon:project:volumes:backups:restore',
args=[backup.id])
url += '?%s' % urlencode({'backup_name': backup.name,
'volume_id': backup.volume_id})
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
blckshrk/Weboob
|
refs/heads/master
|
modules/popolemploi/pages.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BasePage
import dateutil.parser
from .job import PopolemploiJobAdvert
__all__ = ['SearchPage', 'AdvertPage']
class SearchPage(BasePage):
def iter_job_adverts(self):
rows = self.document.getroot().xpath('//table[@class="definition-table ordered"]/tbody/tr')
for row in rows:
advert = self.create_job_advert(row)
if advert:
yield advert
def create_job_advert(self, row):
a = self.parser.select(row, 'td[@headers="offre"]/a', 1, method='xpath')
_id = u'%s' % (a.attrib['href'][-7:])
if _id:
advert = PopolemploiJobAdvert(_id)
advert.contract_type = u'%s' % self.parser.select(row, 'td[@headers="contrat"]', 1, method='xpath').text
advert.title = u'%s' % a.text_content().strip()
society = self.parser.select(row, 'td/div/p/span[@class="company"]', method='xpath')
if society:
advert.society_name = society[0].text
advert.place = u'%s' % self.parser.select(row, 'td[@headers="lieu"]', 1, method='xpath').text_content()
date = self.parser.select(row, 'td[@headers="dateEmission"]', 1, method='xpath')
advert.publication_date = dateutil.parser.parse(date.text).date()
return advert
class AdvertPage(BasePage):
def get_job_advert(self, url, advert):
content = self.document.getroot().xpath('//div[@class="block-content"]/div')[0]
if not advert:
_id = self.parser.select(content, 'ul/li/ul/li/div[@class="value"]/span', 1, method='xpath').text
advert = PopolemploiJobAdvert(_id)
advert.title = u'%s' % self.parser.select(content, 'h4', 1, method='xpath').text.strip()
advert.job_name = u'%s' % self.parser.select(content, 'h4', 1, method='xpath').text.strip()
advert.description = u'%s' % self.parser.select(content, 'p[@itemprop="description"]', 1, method='xpath').text
society_name = self.parser.select(content, 'div[@class="vcard"]/p[@class="title"]/span', method='xpath')
if society_name:
advert.society_name = u'%s' % society_name[0].text
advert.url = url
place = u'%s' % self.parser.select(content,
'ul/li/div[@class="value"]/ul/li[@itemprop="addressRegion"]',
1, method='xpath').text
if place:
advert.place = place.strip()
contract_type = u'%s' % self.parser.select(content,
'ul/li/div[@class="value"]/span[@itemprop="employmentType"]',
1, method='xpath').text
if contract_type:
advert.contract_type = contract_type.strip()
experience = u'%s' % self.parser.select(content,
'ul/li/div[@class="value"]/span[@itemprop="experienceRequirements"]',
1, method='xpath').text
if experience:
advert.experience = experience.strip()
formation = u'%s' % self.parser.select(content,
'ul/li/div[@class="value"]/span[@itemprop="qualifications"]',
1, method='xpath').text
if formation:
advert.formation = formation.strip()
pay = u'%s' % self.parser.select(content,
'ul/li/div[@class="value"]/span[@itemprop="baseSalary"]',
1, method='xpath').text
if pay:
advert.pay = pay.strip()
return advert
|
DDelon/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/traileraddict.py
|
146
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TrailerAddictIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)'
_TEST = {
'url': 'http://www.traileraddict.com/trailer/prince-avalanche/trailer',
'md5': '41365557f3c8c397d091da510e73ceb4',
'info_dict': {
'id': '76184',
'ext': 'mp4',
'title': 'Prince Avalanche Trailer',
'description': 'Trailer for Prince Avalanche.\n\nTwo highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('movie') + '/' + mobj.group('trailer_name')
webpage = self._download_webpage(url, name)
title = self._search_regex(r'<title>(.+?)</title>',
webpage, 'video title').replace(' - Trailer Addict', '')
view_count_str = self._search_regex(
r'<span class="views_n">([0-9,.]+)</span>',
webpage, 'view count', fatal=False)
view_count = (
None if view_count_str is None
else int(view_count_str.replace(',', '')))
video_id = self._search_regex(
r'<param\s+name="movie"\s+value="/emb/([0-9]+)"\s*/>',
webpage, 'video id')
# Presence of (no)watchplus function indicates HD quality is available
if re.search(r'function (no)?watchplus()', webpage):
fvar = "fvarhd"
else:
fvar = "fvar"
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
final_url = self._search_regex(r'&fileurl=(.+)',
info_webpage, 'Download url').replace('%3F', '?')
thumbnail_url = self._search_regex(r'&image=(.+?)&',
info_webpage, 'thumbnail url')
description = self._html_search_regex(
r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>',
webpage, 'description', fatal=False)
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
'view_count': view_count,
}
|
ustramooner/CouchPotato
|
refs/heads/NzbIndexCom
|
library/sqlalchemy/orm/unitofwork.py
|
11
|
# orm/unitofwork.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from sqlalchemy import util, topological
from sqlalchemy.orm import attributes, interfaces
from sqlalchemy.orm import util as mapperutil
from sqlalchemy.orm.util import _state_mapper
# Load lazily
_state_session = None
class UOWEventHandler(interfaces.AttributeExtension):
"""An event handler added to all relationship attributes which handles
session cascade operations.
"""
active_history = False
def __init__(self, key):
self.key = key
def append(self, state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
if prop.cascade.save_update and \
(prop.cascade_backrefs or self.key == initiator.key) and \
item not in sess:
sess.add(item)
return item
def remove(self, state, item, initiator):
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
# expunge pending orphans
if prop.cascade.delete_orphan and \
item in sess.new and \
prop.mapper._is_orphan(attributes.instance_state(item)):
sess.expunge(item)
def set(self, state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
if newvalue is not None and \
prop.cascade.save_update and \
(prop.cascade_backrefs or self.key == initiator.key) and \
newvalue not in sess:
sess.add(newvalue)
if prop.cascade.delete_orphan and \
oldvalue in sess.new and \
prop.mapper._is_orphan(attributes.instance_state(oldvalue)):
sess.expunge(oldvalue)
return newvalue
class UOWTransaction(object):
def __init__(self, session):
self.session = session
self.mapper_flush_opts = session._mapper_flush_opts
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(self, state, key, passive=True):
"""facade to attributes.get_state_history(), including caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
(history, cached_passive) = self.attributes[hashkey]
# if the cached lookup was "passive" and now we want non-passive, do a non-passive
# lookup and re-cache
if cached_passive and not passive:
history = state.get_history(key, passive=False)
self.attributes[hashkey] = (history, passive)
else:
history = state.get_history(key, passive=passive)
self.attributes[hashkey] = (history, passive)
if not history or not state.get_impl(key).uses_objects:
return history
else:
return history.as_state()
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(self, state, isdelete=False,
listonly=False, cancel_delete=False):
if not self.session._contains_state(state):
return
if state not in self.states:
mapper = _state_mapper(state)
if mapper not in self.mappers:
mapper._per_mapper_flush_actions(self)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
def issue_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies,
self.postsort_actions.values())
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self)))
for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if None in edge or \
edge[0].disabled or edge[1].disabled or \
cycles.issuperset(edge):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set([a for a in self.postsort_actions.values()
if not a.disabled
]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
#sort = topological.sort(self.dependencies, postsort_actions)
#print "--------------"
#print self.dependencies
#print list(sort)
#print "COUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies,
postsort_actions):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(
self.dependencies,
postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
for state, (isdelete, listonly) in self.states.iteritems():
if isdelete:
self.session._remove_newly_deleted(state)
else:
# if listonly:
# debug... would like to see how many do this
self.session._register_newly_persistent(state)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if (delete_states or save_states):
if not self.setup_flush_actions and (
self.dependency_processor.\
prop_has_changes(uow, delete_states, True) or
self.dependency_processor.\
prop_has_changes(uow, save_states, False)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
disabled = False
def __new__(cls, uow, *args):
key = (cls, ) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = \
ret = \
object.__new__(cls)
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
",".join(str(x) for x in self.__dict__.values())
)
class ProcessAll(IterateMappersMixin, PostSortRec):
def __init__(self, uow, dependency_processor, delete, fromparent):
self.dependency_processor = dependency_processor
self.delete = delete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor)
def execute(self, uow):
states = self._elements(uow)
if self.delete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.delete
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.delete and not listonly:
yield state
class IssuePostUpdate(PostSortRec):
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
self.mapper._post_update(states, uow, cols)
class SaveUpdateAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._save_obj(
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
False):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
class DeleteAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._delete_obj(
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
True):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
class ProcessState(PostSortRec):
def __init__(self, uow, dependency_processor, delete, state):
self.dependency_processor = dependency_processor
self.delete = delete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
delete = self.delete
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.dependency_processor is dependency_processor and
r.delete is delete]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if delete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
mapperutil.state_str(self.state),
self.delete
)
class SaveUpdateState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
mapper._save_obj(
[self.state] +
[r.state for r in our_recs],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
class DeleteState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
mapper._delete_obj(
[s for s in states if uow.states[s][0]],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
|
mrrusof/snippets
|
refs/heads/master
|
python/utils.py
|
1
|
import sys
def failwith(str):
print 'FAILURE:',str
sys.exit(1)
def warn(str):
print 'WARNING:', str
|
apark263/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/padding_fifo_queue_test.py
|
8
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("PaddingFIFOQueue removed from v2")
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.cached_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
@test_util.run_deprecated_v1
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
|
kuipertan/vitess
|
refs/heads/master
|
py/vtproto/automation_pb2.py
|
8
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: automation.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='automation.proto',
package='automation',
syntax='proto3',
serialized_pb=b'\n\x10\x61utomation.proto\x12\nautomation\"\x90\x01\n\x10\x43lusterOperation\x12\n\n\x02id\x18\x01 \x01(\t\x12/\n\x0cserial_tasks\x18\x02 \x03(\x0b\x32\x19.automation.TaskContainer\x12\x30\n\x05state\x18\x03 \x01(\x0e\x32!.automation.ClusterOperationState\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"N\n\rTaskContainer\x12(\n\x0eparallel_tasks\x18\x01 \x03(\x0b\x32\x10.automation.Task\x12\x13\n\x0b\x63oncurrency\x18\x02 \x01(\x05\"\xce\x01\n\x04Task\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\nparameters\x18\x02 \x03(\x0b\x32 .automation.Task.ParametersEntry\x12\n\n\x02id\x18\x03 \x01(\t\x12$\n\x05state\x18\x04 \x01(\x0e\x32\x15.automation.TaskState\x12\x0e\n\x06output\x18\x05 \x01(\t\x12\r\n\x05\x65rror\x18\x06 \x01(\t\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xb1\x01\n\x1e\x45nqueueClusterOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12N\n\nparameters\x18\x02 \x03(\x0b\x32:.automation.EnqueueClusterOperationRequest.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"-\n\x1f\x45nqueueClusterOperationResponse\x12\n\n\x02id\x18\x01 \x01(\t\"-\n\x1fGetClusterOperationStateRequest\x12\n\n\x02id\x18\x01 \x01(\t\"T\n GetClusterOperationStateResponse\x12\x30\n\x05state\x18\x01 \x01(\x0e\x32!.automation.ClusterOperationState\"/\n!GetClusterOperationDetailsRequest\x12\n\n\x02id\x18\x01 \x01(\t\"V\n\"GetClusterOperationDetailsResponse\x12\x30\n\ncluster_op\x18\x02 \x01(\x0b\x32\x1c.automation.ClusterOperation*\x9a\x01\n\x15\x43lusterOperationState\x12#\n\x1fUNKNOWN_CLUSTER_OPERATION_STATE\x10\x00\x12!\n\x1d\x43LUSTER_OPERATION_NOT_STARTED\x10\x01\x12\x1d\n\x19\x43LUSTER_OPERATION_RUNNING\x10\x02\x12\x1a\n\x16\x43LUSTER_OPERATION_DONE\x10\x03*K\n\tTaskState\x12\x16\n\x12UNKNOWN_TASK_STATE\x10\x00\x12\x0f\n\x0bNOT_STARTED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CLUSTEROPERATIONSTATE = _descriptor.EnumDescriptor(
name='ClusterOperationState',
full_name='automation.ClusterOperationState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_CLUSTER_OPERATION_STATE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_OPERATION_NOT_STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_OPERATION_RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_OPERATION_DONE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=966,
serialized_end=1120,
)
_sym_db.RegisterEnumDescriptor(_CLUSTEROPERATIONSTATE)
ClusterOperationState = enum_type_wrapper.EnumTypeWrapper(_CLUSTEROPERATIONSTATE)
_TASKSTATE = _descriptor.EnumDescriptor(
name='TaskState',
full_name='automation.TaskState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_TASK_STATE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DONE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1122,
serialized_end=1197,
)
_sym_db.RegisterEnumDescriptor(_TASKSTATE)
TaskState = enum_type_wrapper.EnumTypeWrapper(_TASKSTATE)
UNKNOWN_CLUSTER_OPERATION_STATE = 0
CLUSTER_OPERATION_NOT_STARTED = 1
CLUSTER_OPERATION_RUNNING = 2
CLUSTER_OPERATION_DONE = 3
UNKNOWN_TASK_STATE = 0
NOT_STARTED = 1
RUNNING = 2
DONE = 3
_CLUSTEROPERATION = _descriptor.Descriptor(
name='ClusterOperation',
full_name='automation.ClusterOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='automation.ClusterOperation.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serial_tasks', full_name='automation.ClusterOperation.serial_tasks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='automation.ClusterOperation.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='automation.ClusterOperation.error', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=177,
)
_TASKCONTAINER = _descriptor.Descriptor(
name='TaskContainer',
full_name='automation.TaskContainer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parallel_tasks', full_name='automation.TaskContainer.parallel_tasks', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concurrency', full_name='automation.TaskContainer.concurrency', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=179,
serialized_end=257,
)
_TASK_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='automation.Task.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='automation.Task.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='automation.Task.ParametersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=417,
serialized_end=466,
)
_TASK = _descriptor.Descriptor(
name='Task',
full_name='automation.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='automation.Task.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='automation.Task.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='automation.Task.id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='automation.Task.state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output', full_name='automation.Task.output', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='automation.Task.error', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TASK_PARAMETERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=260,
serialized_end=466,
)
_ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='automation.EnqueueClusterOperationRequest.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='automation.EnqueueClusterOperationRequest.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='automation.EnqueueClusterOperationRequest.ParametersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=417,
serialized_end=466,
)
_ENQUEUECLUSTEROPERATIONREQUEST = _descriptor.Descriptor(
name='EnqueueClusterOperationRequest',
full_name='automation.EnqueueClusterOperationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='automation.EnqueueClusterOperationRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='automation.EnqueueClusterOperationRequest.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=469,
serialized_end=646,
)
_ENQUEUECLUSTEROPERATIONRESPONSE = _descriptor.Descriptor(
name='EnqueueClusterOperationResponse',
full_name='automation.EnqueueClusterOperationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='automation.EnqueueClusterOperationResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=648,
serialized_end=693,
)
_GETCLUSTEROPERATIONSTATEREQUEST = _descriptor.Descriptor(
name='GetClusterOperationStateRequest',
full_name='automation.GetClusterOperationStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='automation.GetClusterOperationStateRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=695,
serialized_end=740,
)
_GETCLUSTEROPERATIONSTATERESPONSE = _descriptor.Descriptor(
name='GetClusterOperationStateResponse',
full_name='automation.GetClusterOperationStateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='automation.GetClusterOperationStateResponse.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=742,
serialized_end=826,
)
_GETCLUSTEROPERATIONDETAILSREQUEST = _descriptor.Descriptor(
name='GetClusterOperationDetailsRequest',
full_name='automation.GetClusterOperationDetailsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='automation.GetClusterOperationDetailsRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=828,
serialized_end=875,
)
_GETCLUSTEROPERATIONDETAILSRESPONSE = _descriptor.Descriptor(
name='GetClusterOperationDetailsResponse',
full_name='automation.GetClusterOperationDetailsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster_op', full_name='automation.GetClusterOperationDetailsResponse.cluster_op', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=877,
serialized_end=963,
)
_CLUSTEROPERATION.fields_by_name['serial_tasks'].message_type = _TASKCONTAINER
_CLUSTEROPERATION.fields_by_name['state'].enum_type = _CLUSTEROPERATIONSTATE
_TASKCONTAINER.fields_by_name['parallel_tasks'].message_type = _TASK
_TASK_PARAMETERSENTRY.containing_type = _TASK
_TASK.fields_by_name['parameters'].message_type = _TASK_PARAMETERSENTRY
_TASK.fields_by_name['state'].enum_type = _TASKSTATE
_ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY.containing_type = _ENQUEUECLUSTEROPERATIONREQUEST
_ENQUEUECLUSTEROPERATIONREQUEST.fields_by_name['parameters'].message_type = _ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY
_GETCLUSTEROPERATIONSTATERESPONSE.fields_by_name['state'].enum_type = _CLUSTEROPERATIONSTATE
_GETCLUSTEROPERATIONDETAILSRESPONSE.fields_by_name['cluster_op'].message_type = _CLUSTEROPERATION
DESCRIPTOR.message_types_by_name['ClusterOperation'] = _CLUSTEROPERATION
DESCRIPTOR.message_types_by_name['TaskContainer'] = _TASKCONTAINER
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['EnqueueClusterOperationRequest'] = _ENQUEUECLUSTEROPERATIONREQUEST
DESCRIPTOR.message_types_by_name['EnqueueClusterOperationResponse'] = _ENQUEUECLUSTEROPERATIONRESPONSE
DESCRIPTOR.message_types_by_name['GetClusterOperationStateRequest'] = _GETCLUSTEROPERATIONSTATEREQUEST
DESCRIPTOR.message_types_by_name['GetClusterOperationStateResponse'] = _GETCLUSTEROPERATIONSTATERESPONSE
DESCRIPTOR.message_types_by_name['GetClusterOperationDetailsRequest'] = _GETCLUSTEROPERATIONDETAILSREQUEST
DESCRIPTOR.message_types_by_name['GetClusterOperationDetailsResponse'] = _GETCLUSTEROPERATIONDETAILSRESPONSE
DESCRIPTOR.enum_types_by_name['ClusterOperationState'] = _CLUSTEROPERATIONSTATE
DESCRIPTOR.enum_types_by_name['TaskState'] = _TASKSTATE
ClusterOperation = _reflection.GeneratedProtocolMessageType('ClusterOperation', (_message.Message,), dict(
DESCRIPTOR = _CLUSTEROPERATION,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.ClusterOperation)
))
_sym_db.RegisterMessage(ClusterOperation)
TaskContainer = _reflection.GeneratedProtocolMessageType('TaskContainer', (_message.Message,), dict(
DESCRIPTOR = _TASKCONTAINER,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.TaskContainer)
))
_sym_db.RegisterMessage(TaskContainer)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), dict(
ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _TASK_PARAMETERSENTRY,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.Task.ParametersEntry)
))
,
DESCRIPTOR = _TASK,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.Task)
))
_sym_db.RegisterMessage(Task)
_sym_db.RegisterMessage(Task.ParametersEntry)
EnqueueClusterOperationRequest = _reflection.GeneratedProtocolMessageType('EnqueueClusterOperationRequest', (_message.Message,), dict(
ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.EnqueueClusterOperationRequest.ParametersEntry)
))
,
DESCRIPTOR = _ENQUEUECLUSTEROPERATIONREQUEST,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.EnqueueClusterOperationRequest)
))
_sym_db.RegisterMessage(EnqueueClusterOperationRequest)
_sym_db.RegisterMessage(EnqueueClusterOperationRequest.ParametersEntry)
EnqueueClusterOperationResponse = _reflection.GeneratedProtocolMessageType('EnqueueClusterOperationResponse', (_message.Message,), dict(
DESCRIPTOR = _ENQUEUECLUSTEROPERATIONRESPONSE,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.EnqueueClusterOperationResponse)
))
_sym_db.RegisterMessage(EnqueueClusterOperationResponse)
GetClusterOperationStateRequest = _reflection.GeneratedProtocolMessageType('GetClusterOperationStateRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTEROPERATIONSTATEREQUEST,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.GetClusterOperationStateRequest)
))
_sym_db.RegisterMessage(GetClusterOperationStateRequest)
GetClusterOperationStateResponse = _reflection.GeneratedProtocolMessageType('GetClusterOperationStateResponse', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTEROPERATIONSTATERESPONSE,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.GetClusterOperationStateResponse)
))
_sym_db.RegisterMessage(GetClusterOperationStateResponse)
GetClusterOperationDetailsRequest = _reflection.GeneratedProtocolMessageType('GetClusterOperationDetailsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTEROPERATIONDETAILSREQUEST,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.GetClusterOperationDetailsRequest)
))
_sym_db.RegisterMessage(GetClusterOperationDetailsRequest)
GetClusterOperationDetailsResponse = _reflection.GeneratedProtocolMessageType('GetClusterOperationDetailsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTEROPERATIONDETAILSRESPONSE,
__module__ = 'automation_pb2'
# @@protoc_insertion_point(class_scope:automation.GetClusterOperationDetailsResponse)
))
_sym_db.RegisterMessage(GetClusterOperationDetailsResponse)
_TASK_PARAMETERSENTRY.has_options = True
_TASK_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
_ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY.has_options = True
_ENQUEUECLUSTEROPERATIONREQUEST_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.early_adopter import implementations as early_adopter_implementations
from grpc.framework.alpha import utilities as alpha_utilities
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
|
JeyZeta/Dangerous
|
refs/heads/master
|
Dangerous/Weevely/core/argparse.py
|
1
|
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
from moduleexception import ModuleException
from ast import literal_eval
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.2.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
type=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class StoredNamespace(Namespace):
stored = True
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
if not hasattr(namespace, 'stored') or getattr(namespace, 'stored') == False: self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
if not hasattr(namespace, 'stored'): self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
if not hasattr(namespace, 'stored'): self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
if not arg_string:
raise ValueError("Empty value")
elif type_func.__name__ != 'identity':
result = type_func(literal_eval(arg_string))
else:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
#self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
pass
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage()
raise ModuleException(self.prog[1:], message + '. Run \':help %s\' for help.' % self.prog[1:])
|
kalvdans/scipy
|
refs/heads/master
|
scipy/special/tests/test_gammainc.py
|
48
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.special import gammainc
from scipy.special._testutils import FuncData
def test_line():
# Test on the line a = x where a simpler asymptotic expansion
# (analog of DLMF 8.12.15) is available.
def gammainc_line(x):
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
x = np.logspace(np.log10(25), 300, 500)
a = x.copy()
dataset = np.vstack((a, x, gammainc_line(x))).T
FuncData(gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
|
Rona111/website
|
refs/heads/8.0
|
website_event_register_free_with_sale/__openerp__.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Register for free events - Sale extension",
"version": "1.0",
"author": "Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Antiun Ingeniería S.L.,",
"Odoo Community Association (OCA)"
"license": "AGPL-3",
"category": "Website",
"summary": "Combine free and paid tickets on events",
"depends": [
'website_event_register_free',
'website_event_sale',
],
"auto_install": True,
"installable": True,
}
|
ahmedaljazzar/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/services.py
|
17
|
"""
Certificate service
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from lms.djangoapps.utils import _get_key
from opaque_keys.edx.keys import CourseKey
from .models import GeneratedCertificate
log = logging.getLogger(__name__)
class CertificateService(object):
"""
User Certificate service
"""
def invalidate_certificate(self, user_id, course_key_or_id):
"""
Invalidate the user certificate in a given course if it exists.
"""
course_key = _get_key(course_key_or_id, CourseKey)
try:
generated_certificate = GeneratedCertificate.objects.get(
user=user_id,
course_id=course_key
)
generated_certificate.invalidate()
log.info(
u'Certificate invalidated for user %d in course %s',
user_id,
course_key
)
except ObjectDoesNotExist:
log.warning(
u'Invalidation failed because a certificate for user %d in course %s does not exist.',
user_id,
course_key
)
|
kaste/mockito-python
|
refs/heads/master
|
tests/staticmethods_test.py
|
1
|
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .test_base import TestBase
from mockito import when, verify, unstub, any
from mockito.verification import VerificationError
class Dog:
@staticmethod
def bark():
return "woof"
@staticmethod
def barkHardly(*args):
return "woof woof"
class Cat:
@staticmethod
def meow():
return "miau"
class StaticMethodsTest(TestBase):
def tearDown(self):
unstub()
def testUnstubs(self):
when(Dog).bark().thenReturn("miau")
unstub()
self.assertEqual("woof", Dog.bark())
# TODO decent test case please :) without testing irrelevant implementation
# details
def testUnstubShouldPreserveMethodType(self):
when(Dog).bark().thenReturn("miau!")
unstub()
self.assertTrue(isinstance(Dog.__dict__.get("bark"), staticmethod))
def testStubs(self):
self.assertEqual("woof", Dog.bark())
when(Dog).bark().thenReturn("miau")
self.assertEqual("miau", Dog.bark())
def testStubsConsecutiveCalls(self):
when(Dog).bark().thenReturn(1).thenReturn(2)
self.assertEqual(1, Dog.bark())
self.assertEqual(2, Dog.bark())
self.assertEqual(2, Dog.bark())
def testStubsWithArgs(self):
self.assertEqual("woof woof", Dog.barkHardly(1, 2))
when(Dog).barkHardly(1, 2).thenReturn("miau")
self.assertEqual("miau", Dog.barkHardly(1, 2))
def testStubsButDoesNotMachArguments(self):
self.assertEqual("woof woof", Dog.barkHardly(1, "anything"))
when(Dog, strict=False).barkHardly(1, 2).thenReturn("miau")
self.assertEqual(None, Dog.barkHardly(1))
def testStubsMultipleClasses(self):
when(Dog).barkHardly(1, 2).thenReturn(1)
when(Dog).bark().thenReturn(2)
when(Cat).meow().thenReturn(3)
self.assertEqual(1, Dog.barkHardly(1, 2))
self.assertEqual(2, Dog.bark())
self.assertEqual(3, Cat.meow())
unstub()
self.assertEqual("woof", Dog.bark())
self.assertEqual("miau", Cat.meow())
def testVerifiesSuccesfully(self):
when(Dog).bark().thenReturn("boo")
Dog.bark()
verify(Dog).bark()
def testVerifiesWithArguments(self):
when(Dog).barkHardly(1, 2).thenReturn("boo")
Dog.barkHardly(1, 2)
verify(Dog).barkHardly(1, any())
def testFailsVerification(self):
when(Dog).bark().thenReturn("boo")
Dog.bark()
self.assertRaises(VerificationError, verify(Dog).barkHardly, (1, 2))
def testFailsOnInvalidArguments(self):
when(Dog).bark().thenReturn("boo")
Dog.barkHardly(1, 2)
self.assertRaises(VerificationError, verify(Dog).barkHardly, (1, 20))
def testFailsOnNumberOfCalls(self):
when(Dog).bark().thenReturn("boo")
Dog.bark()
self.assertRaises(VerificationError, verify(Dog, times=2).bark)
def testStubsAndVerifies(self):
when(Dog).bark().thenReturn("boo")
self.assertEqual("boo", Dog.bark())
verify(Dog).bark()
def testStubsTwiceAndUnstubs(self):
when(Dog).bark().thenReturn(1)
when(Dog).bark().thenReturn(2)
self.assertEqual(2, Dog.bark())
unstub()
self.assertEqual("woof", Dog.bark())
def testDoesNotVerifyStubbedCalls(self):
when(Dog).bark().thenReturn(1)
verify(Dog, times=0).bark()
|
syl20bnr/nupic
|
refs/heads/master
|
nupic/regions/ImageSensorFilters/Rotation2D.py
|
17
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from PIL import Image
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Rotation2D(BaseFilter):
"""
Created rotated versions of the image.
"""
def __init__(self, angles=[0], expand=False, targetRatio=None,
highQuality=True):
"""
angles -- List of angles by which to rotate, in degrees.
expand -- Whether to expand the output image to contain the entire
rotated image. If False, the output image will match the dimensions of
the input image, but cropping may occur.
targetRatio -- Ratio of the sensor. If specified, used if expand == False
to grow the image to the target ratio to avoid unnecessary clipping.
highQuality -- Whether to use bicubic interpolation for rotating.
instead of nearest neighbor.
"""
BaseFilter.__init__(self)
self.angles = angles
self.expand = expand
self.targetRatio = targetRatio
self.highQuality = highQuality
if not expand:
for i, angle in enumerate(angles):
if angle != 0 and angle % 90 == 0:
angles[i] -= .01 # Oh, PIL...
def process(self, image):
"""
image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
if not self.expand and self.targetRatio:
# Pad the image to the aspect ratio of the sensor
# This allows us to rotate in expand=False without cutting off parts
# of the image unnecessarily
# Unlike expand=True, the object doesn't get smaller
ratio = (image.size[0] / float(image.size[1]))
if ratio < self.targetRatio:
# Make image wider
size = (int(image.size[0] * self.targetRatio / ratio), image.size[1])
newImage = Image.new('LA', size, (self.background, 0))
newImage.paste(image, ((newImage.size[0] - image.size[0])/2, 0))
image = newImage
elif ratio > self.targetRatio:
# Make image taller
size = (image.size[0], int(image.size[1] * ratio / self.targetRatio))
newImage = Image.new('LA', size, (self.background, 0))
newImage.paste(image, (0, (newImage.size[1] - image.size[1])/2))
image = newImage
if self.highQuality:
resample = Image.BICUBIC
else:
resample = Image.NEAREST
outputs = []
for angle in self.angles:
# Rotate the image, which expands it and pads it with black and a 0
# alpha value
rotatedImage = image.rotate(angle,
resample=resample,
expand=self.expand)
# Create a new larger image to hold the rotated image
# It is filled with the background color and an alpha value of 0
outputImage = Image.new('LA', rotatedImage.size, (self.background, 0))
# Paste the rotated image into the new image, using the rotated image's
# alpha channel as a mask
# This effectively just fills the area around the rotation with the
# background color, and imports the alpha channel from the rotated image
outputImage.paste(rotatedImage, None, rotatedImage.split()[1])
outputs.append(outputImage)
return outputs
def getOutputCount(self):
"""
Return the number of images returned by each call to process().
If the filter creates multiple simultaneous outputs, return a tuple:
(outputCount, simultaneousOutputCount).
"""
return len(self.angles)
|
xifle/greensc
|
refs/heads/master
|
tools/scons/scons-local-2.0.1/SCons/Options/PathOption.py
|
61
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/PathOption.py 5134 2010/08/16 23:02:40 bdeegan"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
class _PathOptionClass(object):
def warn(self):
global warned
if not warned:
msg = "The PathOption() function is deprecated; use the PathVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
def __call__(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable(*args, **kw)
def PathAccept(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathAccept(*args, **kw)
def PathIsDir(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsDir(*args, **kw)
def PathIsDirCreate(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsDirCreate(*args, **kw)
def PathIsFile(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathIsFile(*args, **kw)
def PathExists(self, *args, **kw):
self.warn()
return SCons.Variables.PathVariable.PathExists(*args, **kw)
PathOption = _PathOptionClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bristy/login_app_flask
|
refs/heads/master
|
env/lib/python2.7/site-packages/flask/testsuite/test_apps/moduleapp/apps/admin/__init__.py
|
629
|
from flask import Module, render_template
admin = Module(__name__, url_prefix='/admin')
@admin.route('/')
def index():
return render_template('admin/index.html')
@admin.route('/index2')
def index2():
return render_template('./admin/index.html')
|
larsbutler/zpa
|
refs/heads/master
|
publish/yaml/representer.py
|
360
|
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
|
ahmadshahwan/cohorte-runtime
|
refs/heads/master
|
python/src/lib/python/unidecode/x0bc.py
|
253
|
data = (
'mil', # 0x00
'milg', # 0x01
'milm', # 0x02
'milb', # 0x03
'mils', # 0x04
'milt', # 0x05
'milp', # 0x06
'milh', # 0x07
'mim', # 0x08
'mib', # 0x09
'mibs', # 0x0a
'mis', # 0x0b
'miss', # 0x0c
'ming', # 0x0d
'mij', # 0x0e
'mic', # 0x0f
'mik', # 0x10
'mit', # 0x11
'mip', # 0x12
'mih', # 0x13
'ba', # 0x14
'bag', # 0x15
'bagg', # 0x16
'bags', # 0x17
'ban', # 0x18
'banj', # 0x19
'banh', # 0x1a
'bad', # 0x1b
'bal', # 0x1c
'balg', # 0x1d
'balm', # 0x1e
'balb', # 0x1f
'bals', # 0x20
'balt', # 0x21
'balp', # 0x22
'balh', # 0x23
'bam', # 0x24
'bab', # 0x25
'babs', # 0x26
'bas', # 0x27
'bass', # 0x28
'bang', # 0x29
'baj', # 0x2a
'bac', # 0x2b
'bak', # 0x2c
'bat', # 0x2d
'bap', # 0x2e
'bah', # 0x2f
'bae', # 0x30
'baeg', # 0x31
'baegg', # 0x32
'baegs', # 0x33
'baen', # 0x34
'baenj', # 0x35
'baenh', # 0x36
'baed', # 0x37
'bael', # 0x38
'baelg', # 0x39
'baelm', # 0x3a
'baelb', # 0x3b
'baels', # 0x3c
'baelt', # 0x3d
'baelp', # 0x3e
'baelh', # 0x3f
'baem', # 0x40
'baeb', # 0x41
'baebs', # 0x42
'baes', # 0x43
'baess', # 0x44
'baeng', # 0x45
'baej', # 0x46
'baec', # 0x47
'baek', # 0x48
'baet', # 0x49
'baep', # 0x4a
'baeh', # 0x4b
'bya', # 0x4c
'byag', # 0x4d
'byagg', # 0x4e
'byags', # 0x4f
'byan', # 0x50
'byanj', # 0x51
'byanh', # 0x52
'byad', # 0x53
'byal', # 0x54
'byalg', # 0x55
'byalm', # 0x56
'byalb', # 0x57
'byals', # 0x58
'byalt', # 0x59
'byalp', # 0x5a
'byalh', # 0x5b
'byam', # 0x5c
'byab', # 0x5d
'byabs', # 0x5e
'byas', # 0x5f
'byass', # 0x60
'byang', # 0x61
'byaj', # 0x62
'byac', # 0x63
'byak', # 0x64
'byat', # 0x65
'byap', # 0x66
'byah', # 0x67
'byae', # 0x68
'byaeg', # 0x69
'byaegg', # 0x6a
'byaegs', # 0x6b
'byaen', # 0x6c
'byaenj', # 0x6d
'byaenh', # 0x6e
'byaed', # 0x6f
'byael', # 0x70
'byaelg', # 0x71
'byaelm', # 0x72
'byaelb', # 0x73
'byaels', # 0x74
'byaelt', # 0x75
'byaelp', # 0x76
'byaelh', # 0x77
'byaem', # 0x78
'byaeb', # 0x79
'byaebs', # 0x7a
'byaes', # 0x7b
'byaess', # 0x7c
'byaeng', # 0x7d
'byaej', # 0x7e
'byaec', # 0x7f
'byaek', # 0x80
'byaet', # 0x81
'byaep', # 0x82
'byaeh', # 0x83
'beo', # 0x84
'beog', # 0x85
'beogg', # 0x86
'beogs', # 0x87
'beon', # 0x88
'beonj', # 0x89
'beonh', # 0x8a
'beod', # 0x8b
'beol', # 0x8c
'beolg', # 0x8d
'beolm', # 0x8e
'beolb', # 0x8f
'beols', # 0x90
'beolt', # 0x91
'beolp', # 0x92
'beolh', # 0x93
'beom', # 0x94
'beob', # 0x95
'beobs', # 0x96
'beos', # 0x97
'beoss', # 0x98
'beong', # 0x99
'beoj', # 0x9a
'beoc', # 0x9b
'beok', # 0x9c
'beot', # 0x9d
'beop', # 0x9e
'beoh', # 0x9f
'be', # 0xa0
'beg', # 0xa1
'begg', # 0xa2
'begs', # 0xa3
'ben', # 0xa4
'benj', # 0xa5
'benh', # 0xa6
'bed', # 0xa7
'bel', # 0xa8
'belg', # 0xa9
'belm', # 0xaa
'belb', # 0xab
'bels', # 0xac
'belt', # 0xad
'belp', # 0xae
'belh', # 0xaf
'bem', # 0xb0
'beb', # 0xb1
'bebs', # 0xb2
'bes', # 0xb3
'bess', # 0xb4
'beng', # 0xb5
'bej', # 0xb6
'bec', # 0xb7
'bek', # 0xb8
'bet', # 0xb9
'bep', # 0xba
'beh', # 0xbb
'byeo', # 0xbc
'byeog', # 0xbd
'byeogg', # 0xbe
'byeogs', # 0xbf
'byeon', # 0xc0
'byeonj', # 0xc1
'byeonh', # 0xc2
'byeod', # 0xc3
'byeol', # 0xc4
'byeolg', # 0xc5
'byeolm', # 0xc6
'byeolb', # 0xc7
'byeols', # 0xc8
'byeolt', # 0xc9
'byeolp', # 0xca
'byeolh', # 0xcb
'byeom', # 0xcc
'byeob', # 0xcd
'byeobs', # 0xce
'byeos', # 0xcf
'byeoss', # 0xd0
'byeong', # 0xd1
'byeoj', # 0xd2
'byeoc', # 0xd3
'byeok', # 0xd4
'byeot', # 0xd5
'byeop', # 0xd6
'byeoh', # 0xd7
'bye', # 0xd8
'byeg', # 0xd9
'byegg', # 0xda
'byegs', # 0xdb
'byen', # 0xdc
'byenj', # 0xdd
'byenh', # 0xde
'byed', # 0xdf
'byel', # 0xe0
'byelg', # 0xe1
'byelm', # 0xe2
'byelb', # 0xe3
'byels', # 0xe4
'byelt', # 0xe5
'byelp', # 0xe6
'byelh', # 0xe7
'byem', # 0xe8
'byeb', # 0xe9
'byebs', # 0xea
'byes', # 0xeb
'byess', # 0xec
'byeng', # 0xed
'byej', # 0xee
'byec', # 0xef
'byek', # 0xf0
'byet', # 0xf1
'byep', # 0xf2
'byeh', # 0xf3
'bo', # 0xf4
'bog', # 0xf5
'bogg', # 0xf6
'bogs', # 0xf7
'bon', # 0xf8
'bonj', # 0xf9
'bonh', # 0xfa
'bod', # 0xfb
'bol', # 0xfc
'bolg', # 0xfd
'bolm', # 0xfe
'bolb', # 0xff
)
|
Arelle/Arelle
|
refs/heads/master
|
arelle/CntlrQuickBooks.py
|
3
|
'''
Created on Jan 19, 2012
This module implements Quick Books server mode
@author: Mark V Systems Limited
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
from lxml import etree
import uuid, io, datetime
from arelle import XmlUtil
clientVersion = None
userName = None
sessions = {} # use when interactive session started by Quickbooks side (not used now)
qbRequests = [] # used by rest API or GUI requests for QB data
qbRequestStatus = {}
xbrlInstances = {}
cntlr = None
# report in url path request and type of query to QB
supportedQbReports = {'trialBalance':'GeneralSummary',
'generalLedger':'GeneralDetail',
'journal':'GeneralDetail'
}
# some reports don't provide the needed columns, request explicitly
includeQbColumns = {'trialBalance': '',
'generalLedger': '''
<IncludeColumn>TxnType</IncludeColumn>
<IncludeColumn>Date</IncludeColumn>
<IncludeColumn>RefNumber</IncludeColumn>
<IncludeColumn>Name</IncludeColumn>
<IncludeColumn>Memo</IncludeColumn>
<IncludeColumn>SplitAccount</IncludeColumn>
<IncludeColumn>Credit</IncludeColumn>
<IncludeColumn>Debit</IncludeColumn>
<IncludeColumn>RunningBalance</IncludeColumn>
''',
'journal': ''
}
glEntriesType = {'trialBalance':'trialbalance',
'generalLedger':'balance',
'journal':'journal'
}
qbTxnTypeToGL = {# QB code is case insensitive comparision (lowercase, some QBs do not have expected camel case)
'bill':'voucher', # bills from vendors
'billpayment':'check', # credits from vendors
'billpaymentcheck':'check', # payments to vendors from bank account
'billpmt-check':'check', # QB 2009
'billpaymentcreditcard':'payment-other', # payments to vendor from credit card account
'buildassembly':'other',
'charge':'other',
'check':'check', # checks written on bank account
'credit':'credit-memo',
'creditcardcharge':'payment-other', # credit card account charge
'creditcardcredit':'other', # credit card account credit
'creditmemo':'credit-memo', # credit memo to customer
'deposit':'check', # GL calls it check whether sent or received
'discount':'credit-memo',
'estimate':'other',
'generaljournal':'manual-adjustment',
'inventoryadjustment':'other',
'invoice':'invoice',
'itemreceipt':'receipt',
'journalentry':'manual-adjustment',
'liabilitycheck': 'check',
'payment': 'check',
'paycheck': 'check',
'purchaseorder':'order-vendor',
'receivepayment':'payment-other',
'salesorder':'order-customer',
'salesreceipt':'other',
'salestaxpaymentcheck':'check',
'statementcharge':'other',
'transfer':'payment-other',
'vendorcredit':'credit-memo',
}
def server(_cntlr, soapFile, requestUrlParts):
global cntlr
if cntlr is None: cntlr = _cntlr
soapDocument = etree.parse(soapFile)
soapBody = soapDocument.find("{http://schemas.xmlsoap.org/soap/envelope/}Body")
if soapBody is None:
return ""
else:
for request in soapBody.iterchildren():
requestName = request.tag.partition("}")[2]
print ("request {0}".format(requestName))
response = None
if request.tag == "{http://developer.intuit.com/}serverVersion":
response = "Arelle 1.0"
elif request.tag == "{http://developer.intuit.com/}clientVersion":
global clientVersion
clientVersion = request.find("{http://developer.intuit.com/}strVersion").text
elif request.tag == "{http://developer.intuit.com/}authenticate":
#global userName # not needed for now
#userName = request.find("{http://developer.intuit.com/}strUserName").text
#password is ignored
ticket = str(uuid.uuid1())
global qbRequests
if qbRequests: # start a non-interactive session
response = [ticket, ""]
sessions[ticket] = qbRequests
qbRequests = []
else:
# to start an interactive session automatically from QB side, uncomment
#response = [ticket, "" if not sessions else "none"] # don't start session if one already there
#sessions[ticket] = [{"request":"StartInteractiveMode"}]
response = [ticket, "none"] # response to not start interactive mode
elif request.tag == "{http://developer.intuit.com/}sendRequestXML":
ticket = request.find("{http://developer.intuit.com/}ticket").text
_qbRequests = sessions.get(ticket)
if _qbRequests:
_qbRequest = _qbRequests[0]
action = _qbRequest["request"]
if action == "StartInteractiveMode":
response = ''
elif action in supportedQbReports:
# add company info to request dict
_qbRequest["strHCPResponse"] = request.find("{http://developer.intuit.com/}strHCPResponse").text
response = ('''<?xml version="1.0"?>
<?qbxml version="8.0"?>
<QBXML>
<QBXMLMsgsRq onError="stopOnError">
<{1}ReportQueryRq>
<{1}ReportType>{0}</{1}ReportType>
<ReportPeriod>
<FromReportDate>{2}</FromReportDate>
<ToReportDate>{3}</ToReportDate>
</ReportPeriod>{4}
</{1}ReportQueryRq>
</QBXMLMsgsRq>
</QBXML>''').format(action[0].upper() + action[1:],
supportedQbReports[action],
_qbRequest["fromDate"],
_qbRequest["toDate"],
includeQbColumns[action],
).replace("&","&").replace("<","<").replace(">",">")
elif request.tag == "{http://developer.intuit.com/}connectionError":
ticket = request.find("{http://developer.intuit.com/}ticket").text
hresult = request.find("{http://developer.intuit.com/}hresult").text
if hresult and hresult.startswith("0x"):
hresult = hresult[2:] # remove 0x if present
message = request.find("{http://developer.intuit.com/}message").text
print ("connection error message: [{0}] {1}".format(hresult, message))
_qbRequests = sessions.get(ticket)
if _qbRequests:
qbRequestTicket = _qbRequests[0]["ticket"]
qbRequestStatus[qbRequestTicket] = "ConnectionErrorMessage: [{0}] {1}".format(hresult, message)
response = "done"
elif request.tag == "{http://developer.intuit.com/}receiveResponseXML":
ticket = request.find("{http://developer.intuit.com/}ticket").text
responseXml = (request.find("{http://developer.intuit.com/}response").text or "").replace("<","<").replace(">",">")
_qbRequests = sessions.get(ticket)
if _qbRequests:
if responseXml:
processQbResponse(_qbRequests[0], responseXml)
else:
print ("no response from QuickBooks")
response = str(100 / len(_qbRequests))
sessions[ticket] = _qbRequests[1:]
elif request.tag == "{http://developer.intuit.com/}getLastError":
ticket = request.find("{http://developer.intuit.com/}ticket").text
_qbRequests = sessions.get(ticket)
if _qbRequests:
_qbRequest = _qbRequests[0]
action = _qbRequest["request"]
if action == "StartInteractiveMode":
response = "Interactive mode"
else:
response = "NoOp"
else:
response = "NoOp"
elif request.tag == "{http://developer.intuit.com/}getInteractiveURL":
ticket = request.find("{http://developer.intuit.com/}wcTicket").text
response = "{0}://{1}/quickbooks/server.html?ticket={2}".format(
requestUrlParts.scheme,
requestUrlParts.netloc,
ticket)
sessions[ticket] = [{"request":"WaitForInput"}]
elif request.tag == "{http://developer.intuit.com/}isInteractiveDone":
ticket = request.find("{http://developer.intuit.com/}wcTicket").text
_qbRequests = sessions.get(ticket)
if _qbRequests:
_qbRequest = _qbRequests[0]
action = _qbRequest["request"]
if action == "Done":
response = "Done"
else:
response = "Not done"
else:
response = "Not done"
elif request.tag == "{http://developer.intuit.com/}interactiveRejected":
ticket = request.find("{http://developer.intuit.com/}wcTicket").text
response = "Interactive session timed out or canceled"
sessions.pop(ticket, None)
elif request.tag == "{http://developer.intuit.com/}closeConnection":
response = "OK"
soapResponse = qbResponse(requestName, response)
return soapResponse
def qbRequest(qbReport, fromDate, toDate, file):
ticket = str(uuid.uuid1())
qbRequests.append({"ticket":ticket,
"request":qbReport,
"fromDate":fromDate,
"toDate":toDate,
"xbrlFile":file})
qbRequestStatus[ticket] = _("Waiting for QuickBooks")
return ticket
def qbResponse(responseName, content=None):
if not content:
result = ""
elif isinstance(content, list):
result = '<{0}Result>{1}</{0}Result>'.format(
responseName,
'\n'.join("<string>{0}</string>".format(l) for l in content))
else:
result = '<{0}Result>{1}</{0}Result>'.format(responseName, content)
return ('<?xml version="1.0" encoding="utf-8"?>'
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">'
'<soap:Body>'
'<{0}Response xmlns="http://developer.intuit.com/">'
'{1}'
'</{0}Response>'
'</soap:Body>'
'</soap:Envelope>'.format(responseName, result))
def docEltText(doc, tag, defaultValue=""):
for elt in doc.iter(tag):
return elt.text
return defaultValue
def processQbResponse(qbRequest, responseXml):
from arelle import ModelXbrl, XbrlConst
from arelle.ModelValue import qname
ticket = qbRequest["ticket"]
qbRequestStatus[ticket] = _("Generating XBRL-GL from QuickBooks response")
qbReport = qbRequest["request"]
xbrlFile = qbRequest["xbrlFile"]
fromDate = qbRequest["fromDate"]
toDate = qbRequest["toDate"]
strHCPResponse = qbRequest.get("strHCPResponse", "")
# uncomment to dump out QB responses
'''
with open("c:/temp/test.xml", "w") as fh:
fh.write(responseXml)
with open("c:/temp/testC.xml", "w") as fh:
fh.write(strHCPResponse)
# qb responses dump
'''
companyQbDoc = etree.parse(io.StringIO(initial_value=strHCPResponse))
responseQbDoc = etree.parse(io.StringIO(initial_value=responseXml))
# columns table
colTypeId = {}
colIdType = {}
for colDescElt in responseQbDoc.iter("ColDesc"):
colTypeElt = colDescElt.find("ColType")
if colTypeElt is not None:
colID = colDescElt.get("colID")
colType = colTypeElt.text
if colType == "Amount": # check if there's a credit or debit colTitle
for colTitleElt in colDescElt.iter("ColTitle"):
title = colTitleElt.get("value")
if title in ("Credit", "Debit"):
colType = title
break
colTypeId[colType] = colID
colIdType[colID] = colType
# open new result instance document
# load GL palette file (no instance)
instance = cntlr.modelManager.load("http://www.xbrl.org/taxonomy/int/gl/2006-10-25/plt/case-c-b-m-u-t/gl-plt-2006-10-25.xsd")
if xbrlFile is None:
xbrlFile = "sampleInstance.xbrl"
saveInstance = False
else:
saveInstance = True
instance.createInstance(xbrlFile) # creates an instance as this modelXbrl's entrypoing
newCntx = instance.createContext("http://www.xbrl.org/xbrlgl/sample", "SAMPLE",
"instant", None, datetime.date.today() + datetime.timedelta(1), # today midnight
None, {}, [], [], afterSibling=ModelXbrl.AUTO_LOCATE_ELEMENT)
monetaryUnit = qname(XbrlConst.iso4217, "iso4217:USD")
newUnit = instance.createUnit([monetaryUnit],[], afterSibling=ModelXbrl.AUTO_LOCATE_ELEMENT)
nonNumAttr = [("contextRef", newCntx.id)]
monetaryAttr = [("contextRef", newCntx.id), ("unitRef", newUnit.id), ("decimals", "2")]
isoLanguage = qname("{http://www.xbrl.org/2005/iso639}iso639:en")
# root of GL is accounting entries tuple
xbrlElt = instance.modelDocument.xmlRootElement
'''The container for XBRL GL, accountingEntries, is not the root of an XBRL GL file - the root,
as with all XBRL files, is xbrl. This means that a single XBRL GL file can store one or more
virtual XBRL GL files, through one or more accountingEntries structures with data inside.
The primary key to understanding an XBRL GL file is the entriesType. A single physical XBRL GL
file can have multiple accountingEntries structures to represent both transactions and
master files; the differences are signified by the appropriate entriesType enumerated values.'''
accountingEntries = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountingEntries"))
# Because entriesType is strongly suggested, documentInfo will be required
docInfo = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:documentInfo"), parent=accountingEntries)
# This field, entriesType, provides the automated guidance on the purpose of the XBRL GL information.
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entriesType"), parent=docInfo, attributes=nonNumAttr,
text=glEntriesType[qbReport])
'''Like a serial number, this field, uniqueID, provides a place to uniquely identify/track
a series of entries. It is like less relevant for ad-hoc reports. XBRL GL provides for later
correction through replacement or augmentation of transferred information.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:uniqueID"), parent=docInfo, attributes=nonNumAttr,
text="001")
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:language"), parent=docInfo, attributes=nonNumAttr,
text=XmlUtil.addQnameValue(xbrlElt, isoLanguage))
'''The date associated with the creation of the data reflected within the associated
accountingEntries section. Somewhat like a "printed date" on a paper report'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:creationDate"), parent=docInfo, attributes=nonNumAttr,
text=str(datetime.date.today()))
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:periodCoveredStart"), parent=docInfo, attributes=nonNumAttr,
text=fromDate)
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:periodCoveredEnd"), parent=docInfo, attributes=nonNumAttr,
text=toDate)
instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:sourceApplication"), parent=docInfo, attributes=nonNumAttr,
text=docEltText(companyQbDoc, "ProductName","QuickBooks (version not known)"))
instance.createFact(qname("{http://www.xbrl.org/int/gl/muc/2006-10-25}gl-muc:defaultCurrency"), parent=docInfo, attributes=nonNumAttr,
text=XmlUtil.addQnameValue(xbrlElt, monetaryUnit))
'''Typically, an export from an accounting system does not carry with it information
specifically about the company. However, the name of the company would be a very good
thing to include with the file, making the entityInformation tuple necessary.'''
entityInfo = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entityInformation"), parent=accountingEntries)
'''The name of the company would be a very good thing to include with the file;
this structure and its content are where that would be stored.'''
orgIds = instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationIdentifiers"), parent=entityInfo)
instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationIdentifier"), parent=orgIds, attributes=nonNumAttr,
text=docEltText(companyQbDoc, "CompanyName"))
instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationDescription"), parent=orgIds, attributes=nonNumAttr,
text=docEltText(companyQbDoc, "LegalCompanyName"))
if qbReport == "trialBalance":
qbTxnType = "trialbalance"
else:
qbTxnType = None
qbTxnNumber = None
qbDate = None
qbRefNumber = None
isFirst = True
entryNumber = 1
lineNumber = 1
for dataRowElt in responseQbDoc.iter("DataRow"):
cols = dict((colIdType[colElt.get("colID")], colElt.get("value")) for colElt in dataRowElt.iter("ColData"))
if qbReport == "trialBalance" and "Label" in cols:
cols["SplitAccount"] = cols["Label"]
hasRowDataAccount = False
for rowDataElt in dataRowElt.iter("RowData"):
rowType = rowDataElt.get("rowType")
if rowType == "account":
hasRowDataAccount = True
if "SplitAccount" not in cols:
cols["SplitAccount"] = rowDataElt.get("value")
if qbReport == "trialBalance" and not hasRowDataAccount:
continue # skip total lines or others without account information
elif qbReport in ("generalLedger", "journal"):
if "TxnType" not in cols:
continue # not a reportable entry
# entry header fields only on new item that generates an entry header
if "TxnType" in cols:
qbTxnType = cols["TxnType"]
if "TxnNumber" in cols:
qbTxnNumber = cols["TxnNumber"]
if "Date" in cols:
qbDate = cols["Date"]
if "RefNumber" in cols:
qbRefNumber = cols["RefNumber"]
# entry details provided on every entry
qbName = cols.get("Name")
qbMemo = cols.get("Memo")
qbAccount = cols.get("SplitAccount")
qbAmount = cols.get("Amount")
qbDebitAmount = cols.get("Debit")
qbCreditAmount = cols.get("Credit")
runningBalance = cols.get("RunningBalance")
if qbAmount is not None:
drCrCode = None
amt = qbAmount
elif qbDebitAmount is not None:
drCrCode = "D"
amt = qbDebitAmount
elif qbCreditAmount is not None:
drCrCode = "C"
amt = qbCreditAmount
else:
# no amount, skip this transaction
continue
if isFirst or qbTxnNumber:
'''Journal entries require entry in entryHeader and entryDetail.
Few files can be represented using only documentInfo and entityInformation sections,
but it is certainly possible.'''
entryHdr = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryHeader"), parent=accountingEntries)
#instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:enteredBy"), parent=entryHdr, attributes=nonNumAttr, text="")
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:enteredDate"), parent=entryHdr, attributes=nonNumAttr,
text=str(datetime.date.today()))
'''This is an enumerated entry that ties the source journal from the reporting
organization to a fixed list that helps in data interchange.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:sourceJournalID"), parent=entryHdr, attributes=nonNumAttr,
text="gj")
'''Since sourceJournalID is enumerated (you must pick one of the entries already
identified within XBRL GL), sourceJournalDescription lets you capture the actual
code or term used to descibe the source journal by the organization.'''
# instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:sourceJournalDescription"), parent=entryHdr, attributes=nonNumAttr, text="JE")
'''An enumerated field to differentiate between details that represent actual accounting
entries - as opposed to entries for budget purposes, planning purposes, or other entries
that may not contribute to the financial statements.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryType"), parent=entryHdr, attributes=nonNumAttr,
text="standard")
'''When capturing journal entries, you have a series of debits and credits that (normally)
add up to zero. The hierarchical nature of XBRL GL keeps the entry detail lines associated
with the entry header by a parent-child relationship. The unique identifier of each entry
is entered here.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryNumber"), parent=entryHdr, attributes=nonNumAttr,
text=str(entryNumber))
entryNumber += 1
# The reason for making an entry goes here.
if qbRefNumber:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryComment"), parent=entryHdr, attributes=nonNumAttr,
text=qbRefNumber)
'''Individual lines of journal entries will normally require their own entryDetail section -
one primary amount per entryDetail line. However, you can list different accounts within
the same entryDetail line that are associated with that amount. For example, if you
capitalize for US GAAP and expense for IFRS'''
entryDetail = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryDetail"), parent=entryHdr)
# A unique identifier for each entry detail line within an entry header, this should at the least be a counter.
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:lineNumber"), parent=entryDetail, attributes=nonNumAttr,
text=str(lineNumber))
lineNumber += 1
'''If account information is represented elsewhere or as a master file, some of the
fields below would not need to be here (signified by *)'''
account = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:account"), parent=entryDetail)
'''The account number is the basis for posting journal entries. In some cases,
accounting systems used by small organizations do not use account numbers/codes,
but only use a descriptive name for the account.'''
# QB does not have account numbers
# instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountMainID"), parent=account, attributes=nonNumAttr, text="10100")
'''In most cases, the description is given to help a human reader; the accountMainID would
be sufficient for data exchange purposes. As noted previously, some implementations use the
description as the primary identifier of the account.'''
if qbAccount:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountMainDescription"), parent=account, attributes=nonNumAttr,
text=qbAccount)
'''Accounts serve many purposes, and in a large company using more sophisticated software,
the company may wish to record the account used for the original entry and a separate
consolidating account. The Japanese system may require a counterbalancing account for
each line item. And an entry may be recorded differently for US GAAP, IFRS and other purposes.
This code is an enumerated code to help identify accounts for those purposes.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountPurposeCode"), parent=account, attributes=nonNumAttr,
text="usgaap")
'''In an international environment, the "chart of accounts" will include not only
traditional accounts, like Cash, Accounts Payable/Due to Creditors or Retained Earnings,
but also extensions to some of the accounts. Accounts Payable may be extended to
include the creditors/vendors themselves. Therefore, in XBRL GL, accounts can be
specifically identified as the "traditional" accountm or to identify a customer,
vendor, employee, bank, job or fixed asset. While this may overlap with the customers,
vendors and employees of the identifier structure, fixed-assets in the measurable
structure, jobs in the jobInfo structure and other representations, they can also be
represented here as appropriate to the jurisidiction.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountType"), parent=account, attributes=nonNumAttr, text="account")
'''What is a journal entry without a (monetary) amount? While XBRL GL may usher in journal
entries that also incorporate quantities, to reflect the detail of business metrics, the
(monetary) amount is another key and obvious fields. XBRL GL has been designed to reflect
how popular accounting systems store amounts - some combination of a signed amount (e.g., 5, -10),
a separate sign (entered into signOfAmount) and a separate place to indicate the number is
associated with a debit or credit (debitCreditCode).'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:amount"), parent=entryDetail, attributes=monetaryAttr,
text=amt)
'''Depending on the originating system, this field may contain whether the amount is
associated with a debit or credit. Interpreting the number correctly for import requires
an understanding of the three related amount fields - amount, debitCreditCode and sign of amount.'''
if drCrCode:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:debitCreditCode"), parent=entryDetail, attributes=nonNumAttr,
text=drCrCode)
'''Depending on the originating system, this field may contain whether the amount is
signed (+ or -) separately from the amount field itself. Interpreting the number correctly
for import requires an understanding of the three related amount fields - amount,
debitCreditCode and sign of amount.'''
# instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:signOfAmount"), parent=entryDetail, attributes=nonNumAttr, text="+")
# This date is the accounting significance date, not the date that entries were actually entered or posted to the system.
if qbDate:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:postingDate"), parent=entryDetail, attributes=nonNumAttr,
text=qbDate)
if qbName or qbMemo:
identRef = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierReference"), parent=entryDetail)
if qbMemo:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierCode"), parent=identRef, attributes=nonNumAttr,
text=qbMemo)
if qbName:
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierDescription"), parent=identRef, attributes=nonNumAttr,
text=qbName)
#instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierType"), parent=identRef, attributes=nonNumAttr,
# text="V")
if qbReport != "trialBalance":
if qbTxnType: # not exactly same enumerations as expected by QB
cleanedQbTxnType = qbTxnType.replace(" ","").lower()
glDocType = qbTxnTypeToGL.get(cleanedQbTxnType) # try table lookup
if glDocType is None: # not in table
if cleanedQbTxnType.endswith("check"): # didn't convert, probably should be a check
glDocType = "check"
# TBD add more QB transations here as they are discovered and not in table
else:
glDocType = qbTxnType # if all else fails pass through QB TxnType, it will fail GL validation and be noticed!
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:documentType"), parent=entryDetail, attributes=nonNumAttr,
text=glDocType)
'''This enumerated field is used to specifically state whether the entries have been
posted to the originating system or not.'''
instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:postingStatus"), parent=entryDetail, attributes=nonNumAttr,
text="posted")
# A comment at the individual entry detail level.
# instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:detailComment"), parent=entryDetail, attributes=nonNumAttr, text="Comment...")
isFirst = False
if saveInstance:
qbRequestStatus[ticket] = _("Saving XBRL-GL instance")
instance.saveInstance()
qbRequestStatus[ticket] = _("Done")
# TBD resolve errors
instance.errors = [] # TBD fix this
xbrlInstances[ticket] = instance.uuid
|
unseenlaser/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/importlib/test/extension/util.py
|
51
|
import imp
import os
import sys
PATH = None
EXT = None
FILENAME = None
NAME = '_testcapi'
_file_exts = [x[0] for x in imp.get_suffixes() if x[2] == imp.C_EXTENSION]
try:
for PATH in sys.path:
for EXT in _file_exts:
FILENAME = NAME + EXT
FILEPATH = os.path.join(PATH, FILENAME)
if os.path.exists(os.path.join(PATH, FILENAME)):
raise StopIteration
else:
PATH = EXT = FILENAME = FILEPATH = None
except StopIteration:
pass
del _file_exts
|
ocefpaf/system-test
|
refs/heads/master
|
pelican-plugins/representative_image/__init__.py
|
65
|
from .representative_image import *
|
ricardosiri68/patchcap
|
refs/heads/master
|
PatchMan/patchman/controller/device_controller.py
|
1
|
from patchman.models import Device, DBSession
from formencode import validators
from formencode.schema import Schema
from pyramid.httpexceptions import (HTTPFound, HTTPNotFound)
from pyramid.renderers import render_to_response
from pyramid.view import view_config
from pyramid_uniform import Form
from pyramid_uniform import Form, FormRenderer
from sqlalchemy.exc import IntegrityError
from webhelpers import paginate
from webhelpers.paginate import Page
from patchman.utils.supercontrol import *
from patchman.utils.wsdiscovery import *
from os import path
import glob
import logging
import transaction
log = logging.getLogger(__name__)
class OnvifDevice(object):
def __init__(self, name, ip, scope):
self.name = name
self.ip = ip
self.scope = scope
class DeviceForm(Schema):
filter_extra_fields = True
allow_extra_fields = True
name = validators.String(not_empty=True)
ip = validators.String(not_empty=False)
instream = validators.String(not_empty=True)
outstream = validators.String(not_empty=True)
username = validators.String(not_empty=False)
password = validators.String(not_empty=False)
roi = validators.String(not_empty=False)
@view_config(route_name="device_list")
def list(request):
"""devices list """
search = request.params.get("search", "")
sort= "name"
if request.GET.get("sort") and request.GET.get("sort") == "name":
sort = request.GET.get("sort")
direction = "asc"
if request.GET.get("direction") and request.GET.get("direction") in ["asc", "desc"]:
direction = request.GET.get("direction")
# db query
dbsession = DBSession()
query = dbsession.query(Device).\
filter(Device.name.like(search + "%")).\
order_by(sort + " " + direction)
# paginate
page_url = paginate.PageURL_WebOb(request)
devices = Page(query,
page=int(request.params.get("page", 1)),
items_per_page=10,
url=page_url)
if "partial" in request.params:
# Render the partial list page
return render_to_response("device/listPartial.html",
{"devices": devices},
request=request)
else:
# Render the full list page
return render_to_response("device/list.html",
{"devices": devices},
request=request)
@view_config(route_name="device_discover")
def discover(request):
wsd = WSDiscovery()
wsd.start()
#typeNVT = QName("http://www.onvif.org/ver10/network/wsdl","NetworkVideoTransmitter");
#ret = wsd.searchServices(types=[typeNVT])
ret = wsd.searchServices()
devices = []
for service in ret:
devices.append(OnvifDevice(name=service.getEPR(),
ip = service.getXAddrs(),
scope= service.getScopes()))
return render_to_response("device/discover.html",
{"devices": devices},
request=request)
@view_config(route_name="device_search")
def search(request):
"""devices list searching """
sort = request.GET.get("sort") if request.GET.get("sort") else "name"
direction = "desc" if request.GET.get("direction") == "asc" else "asc"
query = {"sort": sort, "direction": direction}
return HTTPFound(location=request.route_url("device_list", _query=query))
@view_config(route_name="device_new", renderer="device/new.html", permission="add")
def new(request):
"""new country """
form = Form(request, schema=DeviceForm)
if "form_submitted" in request.POST and form.validate():
dbsession = DBSession()
device = form.bind(Device())
# TODO: db error control?
dbsession.add(device)
request.session.flash("warning;Se agrego el dispositivo!")
return HTTPFound(location = request.route_url("device_list"))
return dict(form=FormRenderer(form),
action_url=request.route_url("device_new"))
@view_config(route_name="device_edit", renderer="device/edit.html", permission="add")
def edit(request):
"""device edit """
id = request.matchdict['id']
dbsession = DBSession()
device = dbsession.query(Device).filter_by(id=id).one()
if device is None:
request.session.flash("error;No se encontro el dispositivo!")
return HTTPFound(location=request.route_url("device_list"))
form = Form(request, schema=DeviceForm)
if "form_submitted" in request.POST and form.validate():
form.bind(device)
dbsession.add(device)
request.session.flash("warning;Se guardo el dispositivo!")
s = SuperControl()
s.restart('condor')
return HTTPFound(location = request.route_url("device_list"))
storage = request.registry.settings['storage']
sp = path.join(storage, str(device.id))
last = max(glob.iglob(path.join(sp, '*.png')), key=path.getctime)
sample = '/store/{0}/{1}'.format(device.id, path.split(last)[1])
action_url = request.route_url("device_edit", id=id)
if device.roi is None: device.roi = '400,400,650,650'
return dict(form=FormRenderer(form),
action_url=action_url, obj=device,sample = sample )
@view_config(route_name='device_view', renderer="device/mon.html")
def view(request):
device_id = int(request.matchdict.get('id', -1))
device = Device.findBy(device_id) if device_id>0 else Device.first()
if not device:
return HTTPNotFound()
return {'device':device}
@view_config(route_name="device_delete", permission="delete")
def delete(request):
"""device delete """
id = request.matchdict['id']
dbsession = DBSession()
device = dbsession.query(Device).filter_by(id=id).first()
if device is None:
request.session.flash("error;No se encontro el dispositivo!")
return HTTPFound(location=request.route_url("device_list"))
try:
transaction.begin()
dbsession.delete(device);
transaction.commit()
request.session.flash("warning;Se elimino el dispositivo!")
except IntegrityError:
# delete error
transaction.abort()
request.session.flash("error;No se pudo eliminar el dispositivo!")
return HTTPFound(location=request.route_url("device_list"))
|
mayblue9/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/perceptron.py
|
245
|
# Author: Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from ..feature_selection.from_model import _LearntSelectorMixin
class Perceptron(BaseSGDClassifier, _LearntSelectorMixin):
"""Perceptron
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : None, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to None.
alpha : float
Constant that multiplies the regularization term if regularization is
used. Defaults to 0.0001
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, optional, default True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
eta0 : double
Constant by which the updates are multiplied. Defaults to 1.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
`Perceptron` and `SGDClassifier` share the same underlying implementation.
In fact, `Perceptron()` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
See also
--------
SGDClassifier
References
----------
http://en.wikipedia.org/wiki/Perceptron and references therein.
"""
def __init__(self, penalty=None, alpha=0.0001, fit_intercept=True,
n_iter=5, shuffle=True, verbose=0, eta0=1.0, n_jobs=1,
random_state=0, class_weight=None, warm_start=False):
super(Perceptron, self).__init__(loss="perceptron",
penalty=penalty,
alpha=alpha, l1_ratio=0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
|
SinnerSchraderMobileMirrors/django-cms
|
refs/heads/develop
|
cms/migrations/0016_author_copy.py
|
385
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']"})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': ['auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['auth.User']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.User']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['auth.User']"}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
nexiles/odoo
|
refs/heads/8.0
|
addons/base_setup/__openerp__.py
|
259
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Initial Setup Tools',
'version': '1.0',
'category': 'Hidden',
'description': """
This module helps to configure the system at the installation of a new database.
================================================================================
Shows you a list of applications features to install from.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base', 'web_kanban'],
'data': [
'security/ir.model.access.csv',
'base_setup_views.xml',
'res_config_view.xml',
'res_partner_view.xml',
'views/base_setup.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
GdZ/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_4/tests/modeltests/order_with_respect_to/tests.py
|
150
|
from __future__ import absolute_import
from operator import attrgetter
from django.test import TestCase
from .models import Post, Question, Answer
class OrderWithRespectToTests(TestCase):
def test_basic(self):
q1 = Question.objects.create(text="Which Beatle starts with the letter 'R'?")
q2 = Question.objects.create(text="What is your name?")
Answer.objects.create(text="John", question=q1)
Answer.objects.create(text="Jonno", question=q2)
Answer.objects.create(text="Paul", question=q1)
Answer.objects.create(text="Paulo", question=q2)
Answer.objects.create(text="George", question=q1)
Answer.objects.create(text="Ringo", question=q1)
# The answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = Answer.objects.filter(question=q1)[0]
self.assertEqual(a1.text, "John")
a2 = a1.get_next_in_order()
self.assertEqual(a2.text, "Paul")
a4 = list(Answer.objects.filter(question=q1))[-1]
self.assertEqual(a4.text, "Ringo")
self.assertEqual(a4.get_previous_in_order().text, "George")
# Determining (and setting) the ordering for a particular item is also
# possible.
id_list = [o.pk for o in q1.answer_set.all()]
self.assertEqual(a2.question.get_answer_order(), id_list)
a5 = Answer.objects.create(text="Number five", question=q1)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
self.assertEqual(
a2.question.get_answer_order(), a5.question.get_answer_order()
)
# The ordering can be altered:
id_list = [o.pk for o in q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
self.assertNotEqual(a5.question.get_answer_order(), id_list)
a5.question.set_answer_order(id_list)
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = Post.objects.create(title='1')
p2 = Post.objects.create(title='2')
p1_1 = Post.objects.create(title="1.1", parent=p1)
p1_2 = Post.objects.create(title="1.2", parent=p1)
p2_1 = Post.objects.create(title="2.1", parent=p2)
p1_3 = Post.objects.create(title="1.3", parent=p1)
self.assertEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
|
nemonik/CoCreateLite
|
refs/heads/master
|
ccl-cookbook/files/default/cocreatelite/cocreate/views/playgrounds.py
|
1
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from ..models import VMPlayground
from ..forms import VMPlaygroundForm, VMPlaygroundDescriptionForm, VMPlaygroundUserAccessForm, VMPlaygroundGroupAccessForm
from . import util
from ..util import single_user_mode
"""
View controllers for playground data
"""
@single_user_mode
def index(request):
"""
Show the list of playgrounds for this user.
"""
# determine all of the playgrounds this user has access to
groupids = [group.id for group in request.user.groups.all()]
print ("Group ids: " + str(groupids))
playgrounds = VMPlayground.objects.filter(creator = request.user) | VMPlayground.objects.filter(access_users__id = request.user.id) | VMPlayground.objects.filter(access_groups__id__in = groupids)
# determine all of the demo boxes from a set of playgrounds
demos = []
for playground in playgrounds:
demos = demos + playground.getDemos()
context = {
"playgrounds": playgrounds,
"demos": demos
}
return render(request, "playgrounds.html", util.fillContext(context, request))
@single_user_mode
def add(request):
"""
Add a new playground.
"""
if request.method == 'GET':
form = VMPlaygroundForm()
elif request.method == 'POST':
form = VMPlaygroundForm(request.POST)
if form.is_valid():
# hooray, let's create the playground
playground = VMPlayground.objects.create(
name = form.data['name'],
creator = request.user,
description = form.data['description'],
description_is_markdown = form.data.get('description_is_markdown', False),
environment = form.data['environment'],
)
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form}
return render(request, "addPlayground.html", util.fillContext(opts, request))
@single_user_mode
def remove(request, playground_id):
"""
Remove a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
for sandbox in playground.sandboxes.all():
sandox.delete()
playground.delete()
return HttpResponseRedirect(reverse("playgrounds"))
@single_user_mode
def playground(request, playground_id):
"""
Show the details for this playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
opts = {"playground": playground}
return render(request, "newPlaygroundDetails.html", util.fillContext(opts, request))
@single_user_mode
def alterUserAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundUserAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundUserAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundUserAccess.html", util.fillContext(opts, request))
@single_user_mode
def alterGroupAccess(request, playground_id):
"""
Alter the access control list for a playground.
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundGroupAccessForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundGroupAccessForm(request.POST, instance=playground)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "alterPlaygroundGroupAccess.html", util.fillContext(opts, request))
@single_user_mode
def editDesc(request, playground_id):
"""
Alter or edit the description of the playground
"""
playground = get_object_or_404(VMPlayground, pk = playground_id)
if request.method == 'GET':
form = VMPlaygroundDescriptionForm(instance = playground)
elif request.method == 'POST':
form = VMPlaygroundDescriptionForm(request.POST)
if form.is_valid():
playground.description_is_markdown = form.data['description_is_markdown']
playground.description = form.data['description']
playground.save()
return HttpResponseRedirect(reverse("playground", args=[playground.id]))
else:
pass
opts = {"form": form, "playground": playground }
return render(request, "editPlaygroundDesc.html", util.fillContext(opts, request))
|
ingadhoc/odoo-saas-manager
|
refs/heads/master
|
addons/saas_manager_x/product_template.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Nautical
# Copyright (C) 2013 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class template(osv.osv):
""""""
_inherit = 'product.template'
_columns = {
'type': fields.selection([('product','Stockable Product'),('consu', 'Consumable'),('service','Service'),('saas', 'SaaS')], 'Product Type', required=True, help="Consumable: Will not imply stock management for this product. \nStockable product: Will imply stock management for this product.. \nSaaS product: For SaaS Products."),
}
_defaults = {
}
_constraints = [
]
template()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iuliat/nova
|
refs/heads/master
|
nova/api/openstack/compute/plugins/v3/suspend_server.py
|
36
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = "os-suspend-server"
authorize = extensions.os_compute_authorizer(ALIAS)
class SuspendServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SuspendServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, action='suspend')
try:
server = common.get_instance(self.compute_api, context, id)
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, action='resume')
try:
server = common.get_instance(self.compute_api, context, id)
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume', id)
class SuspendServer(extensions.V3APIExtensionBase):
"""Enable suspend/resume server actions."""
name = "SuspendServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = SuspendServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.