repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
PeteAndersen/swarfarm | refs/heads/master | bestiary/parse/util.py | 1 | import difflib
def update_bestiary_obj(model, com2us_id, defaults):
obj, created = model.objects.get_or_create(com2us_id=com2us_id, defaults=defaults)
if created:
print(f'!!! Created new {model.__name__} {com2us_id}')
else:
# Compare parsed values to existing object
updated = False
for field, parse_value in defaults.items():
current_value = getattr(obj, field)
if current_value != parse_value:
if isinstance(current_value, str):
# Display a diff
print(f'Updating {field} for {com2us_id}')
for line in difflib.ndiff([current_value], [parse_value]):
print(line)
else:
print(f'Updating {field} for {com2us_id} from `{current_value}` to `{parse_value}`.')
setattr(obj, field, parse_value)
updated = True
if updated:
obj.save()
return obj
def show_diff(seqm):
"""Unify operations between two compared strings
seqm is a difflib.SequenceMatcher instance whose a & b are strings"""
output= []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output.append(seqm.a[a0:a1])
elif opcode == 'insert':
output.append("{+" + seqm.b[b0:b1] + "+}")
elif opcode == 'delete':
output.append("{-" + seqm.a[a0:a1] + "-}")
elif opcode == 'replace':
output.append("<del>" + seqm.a[a0:a1] + "</del><ins>" + seqm.b[b0:b1] + "</ins>")
else:
raise RuntimeError("unexpected opcode")
return ''.join(output) |
jwren/intellij-community | refs/heads/master | python/testData/refactoring/extractmethod/DuplicateSingleLine.after.py | 79 | def bar():
a = foo()
print a
a = foo()
print a
def foo():
a = 1
return a
|
CeltonMcGrath/TACTIC | refs/heads/master | src/pyasm/web/html_wdg_test.py | 6 | #!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import unittest, string
from html_wdg import *
from web_state import *
class HtmlWdgTest(unittest.TestCase):
def test_element(my):
br = HtmlElement("br")
my.assertEquals("<br/>\n", br.get_display() )
def test_children(my):
href = HtmlElement.href("yahoo", "http://www.yahoo.com")
my.assertEquals("<a href=\"http://www.yahoo.com\">yahoo</a>\n", href.get_display() )
def test_style(my):
div = HtmlElement.div("Hello")
style = "background-color: #f0f0f0"
div.set_style(style)
my.assertEquals("<div style=\"%s\">Hello</div>\n" % style, div.get_display() )
def test_table(my):
table = Table()
table.add_row()
table.add_cell( "Name:")
table.add_cell( "Remko")
table.add_row()
table.add_cell( "Password:" )
table.add_cell( "pig")
html = Html()
html.writeln("<table cellpadding=\"0\" cellspacing=\"0\">")
html.writeln("<tr><td>Name:</td><td>Remko</td></tr>")
html.writeln("<tr><td>Password:</td><td>pig</td></tr>")
html.writeln("</table>")
a = html.getvalue()
a = string.replace( a ,"\n", "")
b = table.get_display()
b = string.replace( b ,"\n", "")
my.assertEquals( a, b )
if __name__ == '__main__':
unittest.main()
|
tracedeng/shuhe | refs/heads/master | region/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
chriskmanx/qmole | refs/heads/master | QMOLEDEV/node/tools/scons/scons-local-1.2.0/SCons/Tool/pdftex.py | 12 | """SCons.Tool.pdftex
Tool-specific initialization for pdftex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdftex.py 3842 2008/12/20 22:59:52 scons"
import SCons.Action
import SCons.Util
import SCons.Tool.tex
PDFTeXAction = None
# This action might be needed more than once if we are dealing with
# labels and bibtex.
PDFLaTeXAction = None
def PDFLaTeXAuxAction(target = None, source= None, env=None):
result = SCons.Tool.tex.InternalLaTeXAuxAction( PDFLaTeXAction, target, source, env )
return result
def PDFTeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
if SCons.Tool.tex.is_LaTeX(source):
result = PDFLaTeXAuxAction(target,source,env)
else:
result = PDFTeXAction(target,source,env)
return result
PDFTeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for pdftex to an Environment."""
global PDFTeXAction
if PDFTeXAction is None:
PDFTeXAction = SCons.Action.Action('$PDFTEXCOM', '$PDFTEXCOMSTR')
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action("$PDFLATEXCOM", "$PDFLATEXCOMSTR")
global PDFTeXLaTeXAction
if PDFTeXLaTeXAction is None:
PDFTeXLaTeXAction = SCons.Action.Action(PDFTeXLaTeXFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.tex', PDFTeXLaTeXAction)
bld.add_emitter('.tex', SCons.Tool.tex.tex_pdf_emitter)
# Add the epstopdf builder after the pdftex builder
# so pdftex is the default for no source suffix
pdf.generate2(env)
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFTEXCOM'] = 'cd ${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
# Duplicate from latex.py. If latex.py goes away, then this is still OK.
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
def exists(env):
return env.Detect('pdftex')
|
Arcensoth/cogbot | refs/heads/master | cogbot/cogs/robo_mod/conditions/author_has_been_member_for.py | 1 | from datetime import datetime, timedelta
from typing import Optional
from cogbot.cogs.robo_mod.robo_mod_condition import RoboModCondition
from cogbot.cogs.robo_mod.robo_mod_trigger import RoboModTrigger
class AuthorHasBeenMemberForCondition(RoboModCondition):
def __init__(self):
self.more_than: Optional[timedelta] = None
self.less_than: Optional[timedelta] = None
async def update(self, state: "RoboModServerState", data: dict):
# more_than
raw_more_than = data.get("more_than", None)
if raw_more_than is not None:
self.more_than = timedelta(**raw_more_than)
# less_than
raw_less_than = data.get("less_than", None)
if raw_less_than is not None:
self.less_than = timedelta(**raw_less_than)
async def check(self, trigger: RoboModTrigger) -> bool:
now = datetime.utcnow()
joined_at = trigger.author.joined_at
if joined_at is None:
return False
member_for = now - joined_at
is_more_than = (self.more_than is None) or (member_for > self.more_than)
is_less_than = (self.less_than is None) or (member_for < self.less_than)
return is_more_than and is_less_than
|
wjfwzzc/Kaggle_Script | refs/heads/master | word2vec_nlp_tutorial/multinomial_naive_bayes.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import sklearn.naive_bayes
import data
import process.bag_of_words
import submissions
if __name__ == '__main__':
mnb = sklearn.naive_bayes.MultinomialNB()
mnb.fit(process.bag_of_words.train, data.target)
pred = mnb.predict(process.bag_of_words.test)
submissions.save_csv(pred, '{file_name}.csv'.format(file_name=__file__[:-3]))
|
shiora/The-Perfect-Pokemon-Team-Balancer | refs/heads/master | libs/env/Lib/site-packages/whoosh/qparser/dateparse.py | 95 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import re
import sys
from datetime import datetime, timedelta
from whoosh.compat import string_type, iteritems
from whoosh.qparser import plugins, syntax
from whoosh.qparser.taggers import Tagger
from whoosh.support.relativedelta import relativedelta
from whoosh.util.text import rcompile
from whoosh.util.times import adatetime, timespan
from whoosh.util.times import fill_in, is_void, relative_days
from whoosh.util.times import TimeError
class DateParseError(Exception):
"Represents an error in parsing date text."
# Utility functions
def print_debug(level, msg, *args):
if level > 0:
print((" " * (level - 1)) + (msg % args))
# Parser element objects
class Props(object):
"""A dumb little object that just puts copies a dictionary into attibutes
so I can use dot syntax instead of square bracket string item lookup and
save a little bit of typing. Used by :class:`Regex`.
"""
def __init__(self, **args):
self.__dict__ = args
def __repr__(self):
return repr(self.__dict__)
def get(self, key, default=None):
return self.__dict__.get(key, default)
class ParserBase(object):
"""Base class for date parser elements.
"""
def to_parser(self, e):
if isinstance(e, string_type):
return Regex(e)
else:
return e
def parse(self, text, dt, pos=0, debug=-9999):
raise NotImplementedError
def date_from(self, text, dt=None, pos=0, debug=-9999):
if dt is None:
dt = datetime.now()
d, pos = self.parse(text, dt, pos, debug + 1)
return d
class MultiBase(ParserBase):
"""Base class for date parser elements such as Sequence and Bag that
have sub-elements.
"""
def __init__(self, elements, name=None):
"""
:param elements: the sub-elements to match.
:param name: a name for this element (for debugging purposes only).
"""
self.elements = [self.to_parser(e) for e in elements]
self.name = name
def __repr__(self):
return "%s<%s>%r" % (self.__class__.__name__, self.name or '',
self.elements)
class Sequence(MultiBase):
"""Merges the dates parsed by a sequence of sub-elements.
"""
def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", name=None,
progressive=False):
"""
:param elements: the sequence of sub-elements to parse.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param name: a name for this element (for debugging purposes only).
:param progressive: if True, elements after the first do not need to
match. That is, for elements (a, b, c) and progressive=True, the
sequence matches like ``a[b[c]]``.
"""
super(Sequence, self).__init__(elements, name)
self.sep_pattern = sep
if sep:
self.sep_expr = rcompile(sep, re.IGNORECASE)
else:
self.sep_expr = None
self.progressive = progressive
def parse(self, text, dt, pos=0, debug=-9999):
d = adatetime()
first = True
foundall = False
failed = False
print_debug(debug, "Seq %s sep=%r text=%r", self.name,
self.sep_pattern, text[pos:])
for e in self.elements:
print_debug(debug, "Seq %s text=%r", self.name, text[pos:])
if self.sep_expr and not first:
print_debug(debug, "Seq %s looking for sep", self.name)
m = self.sep_expr.match(text, pos)
if m:
pos = m.end()
else:
print_debug(debug, "Seq %s didn't find sep", self.name)
break
print_debug(debug, "Seq %s trying=%r at=%s", self.name, e, pos)
try:
at, newpos = e.parse(text, dt, pos=pos, debug=debug + 1)
except TimeError:
failed = True
break
print_debug(debug, "Seq %s result=%r", self.name, at)
if not at:
break
pos = newpos
print_debug(debug, "Seq %s adding=%r to=%r", self.name, at, d)
try:
d = fill_in(d, at)
except TimeError:
print_debug(debug, "Seq %s Error in fill_in", self.name)
failed = True
break
print_debug(debug, "Seq %s filled date=%r", self.name, d)
first = False
else:
foundall = True
if not failed and (foundall or (not first and self.progressive)):
print_debug(debug, "Seq %s final=%r", self.name, d)
return (d, pos)
else:
print_debug(debug, "Seq %s failed", self.name)
return (None, None)
class Combo(Sequence):
"""Parses a sequence of elements in order and combines the dates parsed
by the sub-elements somehow. The default behavior is to accept two dates
from the sub-elements and turn them into a range.
"""
def __init__(self, elements, fn=None, sep="(\\s+|\\s*,\\s*)", min=2, max=2,
name=None):
"""
:param elements: the sequence of sub-elements to parse.
:param fn: a function to run on all dates found. It should return a
datetime, adatetime, or timespan object. If this argument is None,
the default behavior accepts two dates and returns a timespan.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param min: the minimum number of dates required from the sub-elements.
:param max: the maximum number of dates allowed from the sub-elements.
:param name: a name for this element (for debugging purposes only).
"""
super(Combo, self).__init__(elements, sep=sep, name=name)
self.fn = fn
self.min = min
self.max = max
def parse(self, text, dt, pos=0, debug=-9999):
dates = []
first = True
print_debug(debug, "Combo %s sep=%r text=%r", self.name,
self.sep_pattern, text[pos:])
for e in self.elements:
if self.sep_expr and not first:
print_debug(debug, "Combo %s looking for sep at %r",
self.name, text[pos:])
m = self.sep_expr.match(text, pos)
if m:
pos = m.end()
else:
print_debug(debug, "Combo %s didn't find sep", self.name)
return (None, None)
print_debug(debug, "Combo %s trying=%r", self.name, e)
try:
at, pos = e.parse(text, dt, pos, debug + 1)
except TimeError:
at, pos = None, None
print_debug(debug, "Combo %s result=%r", self.name, at)
if at is None:
return (None, None)
first = False
if is_void(at):
continue
if len(dates) == self.max:
print_debug(debug, "Combo %s length > %s", self.name, self.max)
return (None, None)
dates.append(at)
print_debug(debug, "Combo %s dates=%r", self.name, dates)
if len(dates) < self.min:
print_debug(debug, "Combo %s length < %s", self.name, self.min)
return (None, None)
return (self.dates_to_timespan(dates), pos)
def dates_to_timespan(self, dates):
if self.fn:
return self.fn(dates)
elif len(dates) == 2:
return timespan(dates[0], dates[1])
else:
raise DateParseError("Don't know what to do with %r" % (dates,))
class Choice(MultiBase):
"""Returns the date from the first of its sub-elements that matches.
"""
def parse(self, text, dt, pos=0, debug=-9999):
print_debug(debug, "Choice %s text=%r", self.name, text[pos:])
for e in self.elements:
print_debug(debug, "Choice %s trying=%r", self.name, e)
try:
d, newpos = e.parse(text, dt, pos, debug + 1)
except TimeError:
d, newpos = None, None
if d:
print_debug(debug, "Choice %s matched", self.name)
return (d, newpos)
print_debug(debug, "Choice %s no match", self.name)
return (None, None)
class Bag(MultiBase):
"""Parses its sub-elements in any order and merges the dates.
"""
def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", onceper=True,
requireall=False, allof=None, anyof=None, name=None):
"""
:param elements: the sub-elements to parse.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param onceper: only allow each element to match once.
:param requireall: if True, the sub-elements can match in any order,
but they must all match.
:param allof: a list of indexes into the list of elements. When this
argument is not None, this element matches only if all the
indicated sub-elements match.
:param allof: a list of indexes into the list of elements. When this
argument is not None, this element matches only if any of the
indicated sub-elements match.
:param name: a name for this element (for debugging purposes only).
"""
super(Bag, self).__init__(elements, name)
self.sep_expr = rcompile(sep, re.IGNORECASE)
self.onceper = onceper
self.requireall = requireall
self.allof = allof
self.anyof = anyof
def parse(self, text, dt, pos=0, debug=-9999):
first = True
d = adatetime()
seen = [False] * len(self.elements)
while True:
newpos = pos
print_debug(debug, "Bag %s text=%r", self.name, text[pos:])
if not first:
print_debug(debug, "Bag %s looking for sep", self.name)
m = self.sep_expr.match(text, pos)
if m:
newpos = m.end()
else:
print_debug(debug, "Bag %s didn't find sep", self.name)
break
for i, e in enumerate(self.elements):
print_debug(debug, "Bag %s trying=%r", self.name, e)
try:
at, xpos = e.parse(text, dt, newpos, debug + 1)
except TimeError:
at, xpos = None, None
print_debug(debug, "Bag %s result=%r", self.name, at)
if at:
if self.onceper and seen[i]:
return (None, None)
d = fill_in(d, at)
newpos = xpos
seen[i] = True
break
else:
break
pos = newpos
if self.onceper and all(seen):
break
first = False
if (not any(seen)
or (self.allof and not all(seen[pos] for pos in self.allof))
or (self.anyof and not any(seen[pos] for pos in self.anyof))
or (self.requireall and not all(seen))):
return (None, None)
print_debug(debug, "Bag %s final=%r", self.name, d)
return (d, pos)
class Optional(ParserBase):
"""Wraps a sub-element to indicate that the sub-element is optional.
"""
def __init__(self, element):
self.element = self.to_parser(element)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.element)
def parse(self, text, dt, pos=0, debug=-9999):
try:
d, pos = self.element.parse(text, dt, pos, debug + 1)
except TimeError:
d, pos = None, None
if d:
return (d, pos)
else:
return (adatetime(), pos)
class ToEnd(ParserBase):
"""Wraps a sub-element and requires that the end of the sub-element's match
be the end of the text.
"""
def __init__(self, element):
self.element = element
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.element)
def parse(self, text, dt, pos=0, debug=-9999):
try:
d, pos = self.element.parse(text, dt, pos, debug + 1)
except TimeError:
d, pos = None, None
if d and pos == len(text):
return (d, pos)
else:
return (None, None)
class Regex(ParserBase):
"""Matches a regular expression and maps named groups in the pattern to
datetime attributes using a function or overridden method.
There are two points at which you can customize the behavior of this class,
either by supplying functions to the initializer or overriding methods.
* The ``modify`` function or ``modify_props`` method takes a ``Props``
object containing the named groups and modifies its values (in place).
* The ``fn`` function or ``props_to_date`` method takes a ``Props`` object
and the base datetime and returns an adatetime/datetime.
"""
fn = None
modify = None
def __init__(self, pattern, fn=None, modify=None):
self.pattern = pattern
self.expr = rcompile(pattern, re.IGNORECASE)
self.fn = fn
self.modify = modify
def __repr__(self):
return "<%r>" % (self.pattern,)
def parse(self, text, dt, pos=0, debug=-9999):
m = self.expr.match(text, pos)
if not m:
return (None, None)
props = self.extract(m)
self.modify_props(props)
try:
d = self.props_to_date(props, dt)
except TimeError:
d = None
if d:
return (d, m.end())
else:
return (None, None)
def extract(self, match):
d = match.groupdict()
for key, value in iteritems(d):
try:
value = int(value)
d[key] = value
except (ValueError, TypeError):
pass
return Props(**d)
def modify_props(self, props):
if self.modify:
self.modify(props)
def props_to_date(self, props, dt):
if self.fn:
return self.fn(props, dt)
else:
args = {}
for key in adatetime.units:
args[key] = props.get(key)
return adatetime(**args)
class Month(Regex):
def __init__(self, *patterns):
self.patterns = patterns
self.exprs = [rcompile(pat, re.IGNORECASE) for pat in self.patterns]
self.pattern = ("(?P<month>"
+ "|".join("(%s)" % pat for pat in self.patterns)
+ ")")
self.expr = rcompile(self.pattern, re.IGNORECASE)
def modify_props(self, p):
text = p.month
for i, expr in enumerate(self.exprs):
m = expr.match(text)
if m:
p.month = i + 1
break
class PlusMinus(Regex):
def __init__(self, years, months, weeks, days, hours, minutes, seconds):
rel_years = "((?P<years>[0-9]+) *(%s))?" % years
rel_months = "((?P<months>[0-9]+) *(%s))?" % months
rel_weeks = "((?P<weeks>[0-9]+) *(%s))?" % weeks
rel_days = "((?P<days>[0-9]+) *(%s))?" % days
rel_hours = "((?P<hours>[0-9]+) *(%s))?" % hours
rel_mins = "((?P<mins>[0-9]+) *(%s))?" % minutes
rel_secs = "((?P<secs>[0-9]+) *(%s))?" % seconds
self.pattern = ("(?P<dir>[+-]) *%s *%s *%s *%s *%s *%s *%s(?=(\\W|$))"
% (rel_years, rel_months, rel_weeks, rel_days,
rel_hours, rel_mins, rel_secs))
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
if p.dir == "-":
dir = -1
else:
dir = 1
delta = relativedelta(years=(p.get("years") or 0) * dir,
months=(p.get("months") or 0) * dir,
weeks=(p.get("weeks") or 0) * dir,
days=(p.get("days") or 0) * dir,
hours=(p.get("hours") or 0) * dir,
minutes=(p.get("mins") or 0) * dir,
seconds=(p.get("secs") or 0) * dir)
return dt + delta
class Daynames(Regex):
def __init__(self, next, last, daynames):
self.next_pattern = next
self.last_pattern = last
self._dayname_exprs = tuple(rcompile(pat, re.IGNORECASE)
for pat in daynames)
dn_pattern = "|".join(daynames)
self.pattern = ("(?P<dir>%s|%s) +(?P<day>%s)(?=(\\W|$))"
% (next, last, dn_pattern))
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
if re.match(p.dir, self.last_pattern):
dir = -1
else:
dir = 1
for daynum, expr in enumerate(self._dayname_exprs):
m = expr.match(p.day)
if m:
break
current_daynum = dt.weekday()
days_delta = relative_days(current_daynum, daynum, dir)
d = dt.date() + timedelta(days=days_delta)
return adatetime(year=d.year, month=d.month, day=d.day)
class Time12(Regex):
def __init__(self):
self.pattern = ("(?P<hour>[1-9]|10|11|12)(:(?P<mins>[0-5][0-9])"
"(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?)?"
"\\s*(?P<ampm>am|pm)(?=(\\W|$))")
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
isam = p.ampm.lower().startswith("a")
if p.hour == 12:
if isam:
hr = 0
else:
hr = 12
else:
hr = p.hour
if not isam:
hr += 12
return adatetime(hour=hr, minute=p.mins, second=p.secs, microsecond=p.usecs)
# Top-level parser classes
class DateParser(object):
"""Base class for locale-specific parser classes.
"""
day = Regex("(?P<day>([123][0-9])|[1-9])(?=(\\W|$))(?!=:)",
lambda p, dt: adatetime(day=p.day))
year = Regex("(?P<year>[0-9]{4})(?=(\\W|$))",
lambda p, dt: adatetime(year=p.year))
time24 = Regex("(?P<hour>([0-1][0-9])|(2[0-3])):(?P<mins>[0-5][0-9])"
"(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?"
"(?=(\\W|$))",
lambda p, dt: adatetime(hour=p.hour, minute=p.mins,
second=p.secs, microsecond=p.usecs))
time12 = Time12()
def __init__(self):
simple_year = "(?P<year>[0-9]{4})"
simple_month = "(?P<month>[0-1][0-9])"
simple_day = "(?P<day>[0-3][0-9])"
simple_hour = "(?P<hour>([0-1][0-9])|(2[0-3]))"
simple_minute = "(?P<minute>[0-5][0-9])"
simple_second = "(?P<second>[0-5][0-9])"
simple_usec = "(?P<microsecond>[0-9]{6})"
tup = (simple_year, simple_month, simple_day, simple_hour,
simple_minute, simple_second, simple_usec)
simple_seq = Sequence(tup, sep="[- .:/]*", name="simple",
progressive=True)
self.simple = Sequence((simple_seq, "(?=(\\s|$))"), sep='')
self.setup()
def setup(self):
raise NotImplementedError
#
def get_parser(self):
return self.all
def parse(self, text, dt, pos=0, debug=-9999):
parser = self.get_parser()
d, newpos = parser.parse(text, dt, pos=pos, debug=debug)
if isinstance(d, (adatetime, timespan)):
d = d.disambiguated(dt)
return (d, newpos)
def date_from(self, text, basedate=None, pos=0, debug=-9999, toend=True):
if basedate is None:
basedate = datetime.utcnow()
parser = self.get_parser()
if toend:
parser = ToEnd(parser)
d = parser.date_from(text, basedate, pos=pos, debug=debug)
if isinstance(d, (adatetime, timespan)):
d = d.disambiguated(basedate)
return d
class English(DateParser):
day = Regex("(?P<day>([123][0-9])|[1-9])(st|nd|rd|th)?(?=(\\W|$))",
lambda p, dt: adatetime(day=p.day))
def setup(self):
self.plusdate = PlusMinus("years|year|yrs|yr|ys|y",
"months|month|mons|mon|mos|mo",
"weeks|week|wks|wk|ws|w",
"days|day|dys|dy|ds|d",
"hours|hour|hrs|hr|hs|h",
"minutes|minute|mins|min|ms|m",
"seconds|second|secs|sec|s")
self.dayname = Daynames("next", "last",
("monday|mon|mo", "tuesday|tues|tue|tu",
"wednesday|wed|we", "thursday|thur|thu|th",
"friday|fri|fr", "saturday|sat|sa",
"sunday|sun|su"))
midnight_l = lambda p, dt: adatetime(hour=0, minute=0, second=0,
microsecond=0)
midnight = Regex("midnight", midnight_l)
noon_l = lambda p, dt: adatetime(hour=12, minute=0, second=0,
microsecond=0)
noon = Regex("noon", noon_l)
now = Regex("now", lambda p, dt: dt)
self.time = Choice((self.time12, self.time24, midnight, noon, now),
name="time")
def tomorrow_to_date(p, dt):
d = dt.date() + timedelta(days=+1)
return adatetime(year=d.year, month=d.month, day=d.day)
tomorrow = Regex("tomorrow", tomorrow_to_date)
def yesterday_to_date(p, dt):
d = dt.date() + timedelta(days=-1)
return adatetime(year=d.year, month=d.month, day=d.day)
yesterday = Regex("yesterday", yesterday_to_date)
thisyear = Regex("this year", lambda p, dt: adatetime(year=dt.year))
thismonth = Regex("this month",
lambda p, dt: adatetime(year=dt.year,
month=dt.month))
today = Regex("today",
lambda p, dt: adatetime(year=dt.year, month=dt.month,
day=dt.day))
self.month = Month("january|jan", "february|febuary|feb", "march|mar",
"april|apr", "may", "june|jun", "july|jul",
"august|aug", "september|sept|sep", "october|oct",
"november|nov", "december|dec")
# If you specify a day number you must also specify a month... this
# Choice captures that constraint
self.dmy = Choice((Sequence((self.day, self.month, self.year),
name="dmy"),
Sequence((self.month, self.day, self.year),
name="mdy"),
Sequence((self.year, self.month, self.day),
name="ymd"),
Sequence((self.year, self.day, self.month),
name="ydm"),
Sequence((self.day, self.month), name="dm"),
Sequence((self.month, self.day), name="md"),
Sequence((self.month, self.year), name="my"),
self.month, self.year, self.dayname, tomorrow,
yesterday, thisyear, thismonth, today, now,
), name="date")
self.datetime = Bag((self.time, self.dmy), name="datetime")
self.bundle = Choice((self.plusdate, self.datetime, self.simple),
name="bundle")
self.torange = Combo((self.bundle, "to", self.bundle), name="torange")
self.all = Choice((self.torange, self.bundle), name="all")
# QueryParser plugin
class DateParserPlugin(plugins.Plugin):
"""Adds more powerful parsing of DATETIME fields.
>>> parser.add_plugin(DateParserPlugin())
>>> parser.parse(u"date:'last tuesday'")
"""
def __init__(self, basedate=None, dateparser=None, callback=None,
free=False, free_expr="([A-Za-z][A-Za-z_0-9]*):([^^]+)"):
"""
:param basedate: a datetime object representing the current time
against which to measure relative dates. If you do not supply this
argument, the plugin uses ``datetime.utcnow()``.
:param dateparser: an instance of
:class:`whoosh.qparser.dateparse.DateParser`. If you do not supply
this argument, the plugin automatically uses
:class:`whoosh.qparser.dateparse.English`.
:param callback: a callback function for parsing errors. This allows
you to provide feedback to the user about problems parsing dates.
:param remove: if True, unparseable dates are removed from the token
stream instead of being replaced with ErrorToken.
:param free: if True, this plugin will install a filter early in the
parsing process and try to find undelimited dates such as
``date:last tuesday``. Note that allowing this could result in
normal query words accidentally being parsed as dates sometimes.
"""
self.basedate = basedate
if dateparser is None:
dateparser = English()
self.dateparser = dateparser
self.callback = callback
self.free = free
self.freeexpr = free_expr
def taggers(self, parser):
if self.free:
# If we're tokenizing, we have to go before the FieldsPlugin
return [(DateTagger(self, self.freeexpr), -1)]
else:
return ()
def filters(self, parser):
# Run the filter after the FieldsPlugin assigns field names
return [(self.do_dates, 110)]
def errorize(self, message, node):
if self.callback:
self.callback(message)
return syntax.ErrorNode(message, node)
def text_to_dt(self, node):
text = node.text
try:
dt = self.dateparser.date_from(text, self.basedate)
if dt is None:
return self.errorize(text, node)
else:
n = DateTimeNode(node.fieldname, dt, node.boost)
except DateParseError:
e = sys.exc_info()[1]
n = self.errorize(e, node)
n.startchar = node.startchar
n.endchar = node.endchar
return n
def range_to_dt(self, node):
start = end = None
dp = self.dateparser.get_parser()
if node.start:
start = dp.date_from(node.start, self.basedate)
if start is None:
return self.errorize(node.start, node)
if node.end:
end = dp.date_from(node.end, self.basedate)
if end is None:
return self.errorize(node.end, node)
if start and end:
ts = timespan(start, end).disambiguated(self.basedate)
start, end = ts.start, ts.end
elif start:
start = start.disambiguated(self.basedate)
if isinstance(start, timespan):
start = start.start
elif end:
end = end.disambiguated(self.basedate)
if isinstance(end, timespan):
end = end.end
drn = DateRangeNode(node.fieldname, start, end, boost=node.boost)
drn.startchar = node.startchar
drn.endchar = node.endchar
return drn
def do_dates(self, parser, group):
schema = parser.schema
if not schema:
return group
from whoosh.fields import DATETIME
datefields = frozenset(fieldname for fieldname, field
in parser.schema.items()
if isinstance(field, DATETIME))
for i, node in enumerate(group):
if node.has_fieldname:
fname = node.fieldname or parser.fieldname
else:
fname = None
if isinstance(node, syntax.GroupNode):
group[i] = self.do_dates(parser, node)
elif fname in datefields:
if node.has_text:
group[i] = self.text_to_dt(node)
elif isinstance(node, syntax.RangeNode):
group[i] = self.range_to_dt(node)
return group
class DateTimeNode(syntax.SyntaxNode):
has_fieldname = True
has_boost = True
def __init__(self, fieldname, dt, boost=1.0):
self.fieldname = fieldname
self.dt = dt
self.boost = 1.0
def r(self):
return repr(self.dt)
def query(self, parser):
from whoosh import query
fieldname = self.fieldname or parser.fieldname
field = parser.schema[fieldname]
dt = self.dt
if isinstance(self.dt, datetime):
btext = field.to_bytes(dt)
return query.Term(fieldname, btext, boost=self.boost)
elif isinstance(self.dt, timespan):
return query.DateRange(fieldname, dt.start, dt.end,
boost=self.boost)
else:
raise Exception("Unknown time object: %r" % dt)
class DateRangeNode(syntax.SyntaxNode):
has_fieldname = True
has_boost = True
def __init__(self, fieldname, start, end, boost=1.0):
self.fieldname = fieldname
self.start = start
self.end = end
self.boost = 1.0
def r(self):
return "%r-%r" % (self.start, self.end)
def query(self, parser):
from whoosh import query
fieldname = self.fieldname or parser.fieldname
return query.DateRange(fieldname, self.start, self.end,
boost=self.boost)
class DateTagger(Tagger):
def __init__(self, plugin, expr):
self.plugin = plugin
self.expr = rcompile(expr, re.IGNORECASE)
def match(self, parser, text, pos):
from whoosh.fields import DATETIME
match = self.expr.match(text, pos)
if match:
fieldname = match.group(1)
dtext = match.group(2)
if parser.schema and fieldname in parser.schema:
field = parser.schema[fieldname]
if isinstance(field, DATETIME):
plugin = self.plugin
dateparser = plugin.dateparser
basedate = plugin.basedate
d, newpos = dateparser.parse(dtext, basedate)
if d:
node = DateTimeNode(fieldname, d)
node.startchar = match.start()
node.endchar = newpos + match.start(2)
return node
|
wilkerwma/codeschool | refs/heads/master | vendor/github.com/fabiommendes/django-viewpack/src/viewpack/packs/crud.py | 2 | from django import forms
from django.core.exceptions import ImproperlyConfigured
from viewpack.packs import (
ViewPack, SingleObjectPackMixin, TemplateResponsePackMixin
)
from viewpack.views import (
View, DetailView, CreateView, ListView, DeleteView, UpdateView, TemplateView
)
from viewpack.views.mixins import (
VerboseNamesContextMixin, DetailObjectContextMixin, TemplateResponseMixin,
HasUploadMixin, SingleObjectMixin,
)
from viewpack.utils import lazy, delegate_to_parent
class CRUDViewPack(SingleObjectPackMixin,
TemplateResponsePackMixin,
ViewPack):
"""
A view group that defines a CRUD interface to a model.
It handles the following urls::
/ --> list view
new/ --> creates a new object
<pk>/ --> detail view
<pk>/edit/ --> edit object
<pk>/delete/ --> delete object
Each one of these entry points is controlled by a specific View inner class:
* :class:`viewgoups.CRUDViewPack.ListView`: index listings
* :class:`viewgoups.CRUDViewPack.CreateView`: create new objects
* :class:`viewgoups.CRUDViewPack.DetailView`: show object's detail
* :class:`viewgoups.CRUDViewPack.EditView`: edit object.
* :class:`viewgoups.CRUDViewPack.DeleteView`: delete an object.
It is possible to disable any view by setting the corresponding attribute to
None in a subclass. One can completely replace theses view classes by their
own views or, more conveniently, implement mixin classes that are
automatically used during class creation::
class MyCRUD(CRUDViewPack):
model = MyModel
# Disable list views
ListView = None
# Mixin class that is mixed with the default CreateView class
class CreateViewMixin:
pattern = r'^create/$'
"""
CRUD_VIEWS = {'create', 'detail', 'update', 'delete', 'list'}
#: List of fields that should be excluded from the model forms automatically
#: generated in the child views. Can be used as an alternative to the
# `fields` attribute in order to create a blacklist.
exclude_fields = delegate_to_parent('exclude_fields', None)
@lazy
def fields(self):
"""Define a list of fields that are used to automatically create forms
in the update and create views."""
if self.exclude_fields is None:
return forms.fields_for_model(self.model)
else:
exclude = self.exclude_fields
return forms.fields_for_model(self.model, exclude=exclude)
#: If True, the generic crud templates are not included in the list of
#: template for child views.
disable_crud_templates = delegate_to_parent('disable_crud_templates', False)
#: If True (default), if will use the functions in
# :mod:`viewpack.permissions` to check if the user has permissions to view,
# edit or create new objects.
check_permissions = delegate_to_parent('check_permissions', False)
def get_template_names(self, view_name):
assert isinstance(view_name, str), 'invalid view name: %r' % view_name
try:
names = super().get_template_names(view_name)
except ImproperlyConfigured:
if ((not self.disable_crud_templates) or
(view_name not in self.CRUD_VIEWS)):
raise
names = []
# We add the default views to the search list of valid views
if not self.disable_crud_templates and view_name in self.CRUD_VIEWS:
names.append(
'viewpack/crud/%s%s' % (
view_name, self.template_extension_normalized
))
names.append(
'viewpack/crud/%s-base%s' % (
view_name, self.template_extension_normalized
))
return names
class CreateView(VerboseNamesContextMixin,
HasUploadMixin,
CreateView):
"""Create new objects."""
pattern = r'^new/$'
class DetailView(DetailObjectContextMixin,
VerboseNamesContextMixin,
DetailView):
"""Detail view for object."""
pattern = r'(?P<pk>\d+)/$'
class UpdateView(DetailObjectContextMixin,
VerboseNamesContextMixin,
UpdateView):
"""Edit object."""
pattern = r'^(?P<pk>\d+)/edit/$'
success_url = '../'
class DeleteView(DetailObjectContextMixin,
VerboseNamesContextMixin,
DeleteView):
"""Delete object."""
pattern = r'^(?P<pk>\d+)/delete/$'
class ListView(VerboseNamesContextMixin, ListView):
"""List instances of the given model."""
pattern = r'^$' |
tonk/ansible | refs/heads/devel | test/support/integration/plugins/modules/aws_az_info.py | 11 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: aws_az_info
short_description: Gather information about availability zones in AWS.
description:
- Gather information about availability zones in AWS.
- This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
version_added: '2.5'
author: 'Henrique Rodrigues (@Sodki)'
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
type: dict
extends_documentation_fragment:
- aws
- ec2
requirements: [botocore, boto3]
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all availability zones
- aws_az_info:
# Gather information about a single availability zone
- aws_az_info:
filters:
zone-name: eu-west-1a
'''
RETURN = '''
availability_zones:
returned: on success
description: >
Availability zones that match the provided filters. Each element consists of a dict with all the information
related to that available zone.
type: list
sample: "[
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1b'
},
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1c'
}
]"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
def main():
argument_spec = dict(
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if module._name == 'aws_az_facts':
module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", version='2.14')
connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
# Replace filter key underscores with dashes, for compatibility
sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
try:
availability_zones = connection.describe_availability_zones(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe availability zones.")
# Turn the boto3 result into ansible_friendly_snaked_names
snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
module.exit_json(availability_zones=snaked_availability_zones)
if __name__ == '__main__':
main()
|
home-assistant/home-assistant | refs/heads/dev | homeassistant/components/evohome/water_heater.py | 2 | """Support for WaterHeater devices of (EMEA/EU) Honeywell TCC systems."""
from __future__ import annotations
import logging
from homeassistant.components.water_heater import (
SUPPORT_AWAY_MODE,
SUPPORT_OPERATION_MODE,
WaterHeaterEntity,
)
from homeassistant.const import PRECISION_TENTHS, PRECISION_WHOLE, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as dt_util
from . import EvoChild
from .const import DOMAIN, EVO_FOLLOW, EVO_PERMOVER
_LOGGER = logging.getLogger(__name__)
STATE_AUTO = "auto"
HA_STATE_TO_EVO = {STATE_AUTO: "", STATE_ON: "On", STATE_OFF: "Off"}
EVO_STATE_TO_HA = {v: k for k, v in HA_STATE_TO_EVO.items() if k != ""}
STATE_ATTRS_DHW = ["dhwId", "activeFaults", "stateStatus", "temperatureStatus"]
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Create a DHW controller."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
_LOGGER.debug(
"Adding: DhwController (%s), id=%s",
broker.tcs.hotwater.zone_type,
broker.tcs.hotwater.zoneId,
)
new_entity = EvoDHW(broker, broker.tcs.hotwater)
async_add_entities([new_entity], update_before_add=True)
class EvoDHW(EvoChild, WaterHeaterEntity):
"""Base for a Honeywell TCC DHW controller (aka boiler)."""
def __init__(self, evo_broker, evo_device) -> None:
"""Initialize an evohome DHW controller."""
super().__init__(evo_broker, evo_device)
self._unique_id = evo_device.dhwId
self._name = "DHW controller"
self._icon = "mdi:thermometer-lines"
self._precision = PRECISION_TENTHS if evo_broker.client_v1 else PRECISION_WHOLE
self._supported_features = SUPPORT_AWAY_MODE | SUPPORT_OPERATION_MODE
@property
def state(self):
"""Return the current state."""
return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]]
@property
def current_operation(self) -> str:
"""Return the current operating mode (Auto, On, or Off)."""
if self._evo_device.stateStatus["mode"] == EVO_FOLLOW:
return STATE_AUTO
return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]]
@property
def operation_list(self) -> list[str]:
"""Return the list of available operations."""
return list(HA_STATE_TO_EVO)
@property
def is_away_mode_on(self):
"""Return True if away mode is on."""
is_off = EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]] == STATE_OFF
is_permanent = self._evo_device.stateStatus["mode"] == EVO_PERMOVER
return is_off and is_permanent
async def async_set_operation_mode(self, operation_mode: str) -> None:
"""Set new operation mode for a DHW controller.
Except for Auto, the mode is only until the next SetPoint.
"""
if operation_mode == STATE_AUTO:
await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto())
else:
await self._update_schedule()
until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", ""))
until = dt_util.as_utc(until) if until else None
if operation_mode == STATE_ON:
await self._evo_broker.call_client_api(
self._evo_device.set_dhw_on(until=until)
)
else: # STATE_OFF
await self._evo_broker.call_client_api(
self._evo_device.set_dhw_off(until=until)
)
async def async_turn_away_mode_on(self):
"""Turn away mode on."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_off())
async def async_turn_away_mode_off(self):
"""Turn away mode off."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto())
async def async_turn_on(self):
"""Turn on."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_on())
async def async_turn_off(self):
"""Turn off."""
await self._evo_broker.call_client_api(self._evo_device.set_dhw_off())
async def async_update(self) -> None:
"""Get the latest state data for a DHW controller."""
await super().async_update()
for attr in STATE_ATTRS_DHW:
self._device_state_attrs[attr] = getattr(self._evo_device, attr)
|
vks/servo | refs/heads/master | tests/wpt/harness/wptrunner/metadata.py | 34 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import tempfile
import types
import uuid
from collections import defaultdict
from mozlog.structured import reader
from mozlog.structured import structuredlog
import expected
import manifestupdate
import testloader
import wptmanifest
import wpttest
from vcs import git
manifest = None # Module that will be imported relative to test_root
logger = structuredlog.StructuredLogger("web-platform-tests")
def load_test_manifests(serve_root, test_paths):
do_delayed_imports(serve_root)
manifest_loader = testloader.ManifestLoader(test_paths, False)
return manifest_loader.load()
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run"""
manifests = load_test_manifests(serve_root, test_paths)
change_data = {}
if sync_root is not None:
if rev_old is not None:
rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
if rev_old is not None:
change_data = load_change_data(rev_old, rev_new, repo=sync_root)
expected_map_by_manifest = update_from_logs(manifests,
*log_file_names,
ignore_existing=ignore_existing)
for test_manifest, expected_map in expected_map_by_manifest.iteritems():
url_base = manifests[test_manifest]["url_base"]
metadata_path = test_paths[url_base]["metadata_path"]
write_changes(metadata_path, expected_map)
results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
return unexpected_changes(manifests, change_data, results_changed)
def do_delayed_imports(serve_root):
global manifest
from manifest import manifest
def files_in_repo(repo_root):
return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
def rev_range(rev_old, rev_new, symmetric=False):
joiner = ".." if not symmetric else "..."
return "".join([rev_old, joiner, rev_new])
def paths_changed(rev_old, rev_new, repo):
data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
for line in data.split("\n") if line.strip()]
output = set(lines)
return output
def load_change_data(rev_old, rev_new, repo):
changes = paths_changed(rev_old, rev_new, repo)
rv = {}
status_keys = {"M": "modified",
"A": "new",
"D": "deleted"}
# TODO: deal with renames
for item in changes:
rv[item[1]] = status_keys[item[0]]
return rv
def unexpected_changes(manifests, change_data, files_changed):
files_changed = set(files_changed)
root_manifest = None
for manifest, paths in manifests.iteritems():
if paths["url_base"] == "/":
root_manifest = manifest
break
else:
return []
rv = []
return [fn for fn, tests in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
# For each testrun
# Load all files and scan for the suite_start entry
# Build a hash of filename: properties
# For each different set of properties, gather all chunks
# For each chunk in the set of chunks, go through all tests
# for each test, make a map of {conditionals: [(platform, new_value)]}
# Repeat for each platform
# For each test in the list of tests:
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
def update_from_logs(manifests, *log_filenames, **kwargs):
ignore_existing = kwargs.pop("ignore_existing", False)
expected_map = {}
id_test_map = {}
for test_manifest, paths in manifests.iteritems():
expected_map_manifest, id_path_map_manifest = create_test_tree(paths["metadata_path"],
test_manifest)
expected_map[test_manifest] = expected_map_manifest
id_test_map.update(id_path_map_manifest)
updater = ExpectedUpdater(manifests, expected_map, id_test_map,
ignore_existing=ignore_existing)
for log_filename in log_filenames:
with open(log_filename) as f:
updater.update_from_log(f)
for manifest_expected in expected_map.itervalues():
for tree in manifest_expected.itervalues():
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_expected()
test.coalesce_expected()
return expected_map
def directory_manifests(metadata_path):
rv = []
for dirpath, dirname, filenames in os.walk(metadata_path):
if "__dir__.ini" in filenames:
rel_path = os.path.relpath(dirpath, metadata_path)
rv.append(os.path.join(rel_path, "__dir__.ini"))
return rv
def write_changes(metadata_path, expected_map):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected_map)
# Keep all __dir__.ini files (these are not in expected_map because they
# aren't associated with a specific test)
keep_files = directory_manifests(metadata_path)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files.extend(item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item)))
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected_map):
# Serialize the data back to a file
for tree in expected_map.itervalues():
if not tree.is_empty:
manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
assert manifest_str != ""
path = expected.expected_path(metadata_path, tree.test_path)
dir = os.path.split(path)[0]
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, "w") as f:
f.write(manifest_str)
class ExpectedUpdater(object):
def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
self.test_manifests = test_manifests
self.expected_tree = expected_tree
self.id_path_map = id_path_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end}
self.tests_visited = {}
self.test_cache = {}
def update_from_log(self, log_file):
self.run_info = None
log_reader = reader.read(log_file)
reader.each_log(log_reader, self.action_map)
def suite_start(self, data):
self.run_info = data["run_info"]
def test_id(self, id):
if type(id) in types.StringTypes:
return id
else:
return tuple(id)
def test_start(self, data):
test_id = self.test_id(data["test"])
try:
test_manifest, test = self.id_path_map[test_id]
expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
except KeyError:
print "Test not found %s, skipping" % test_id
return
self.test_cache[test_id] = expected_node
if test_id not in self.tests_visited:
if self.ignore_existing:
expected_node.clear_expected()
self.tests_visited[test_id] = set()
def test_status(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
subtest = test.get_subtest(data["subtest"])
self.tests_visited[test.id].add(data["subtest"])
result = test_cls.subtest_result_cls(
data["subtest"],
data["status"],
data.get("message"))
subtest.set_result(self.run_info, result)
def test_end(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
if data["status"] == "SKIP":
return
result = test_cls.result_cls(
data["status"],
data.get("message"))
test.set_result(self.run_info, result)
del self.test_cache[test_id]
def create_test_tree(metadata_path, test_manifest):
expected_map = {}
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual"])
include_types = set(manifest.item_types) - exclude_types
for test_path, tests in test_manifest.itertypes(*include_types):
expected_data = load_expected(test_manifest, metadata_path, test_path, tests)
if expected_data is None:
expected_data = create_expected(test_manifest, test_path, tests)
for test in tests:
id_test_map[test.id] = (test_manifest, test)
expected_map[test] = expected_data
return expected_map, id_test_map
def create_expected(test_manifest, test_path, tests):
expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base)
for test in tests:
expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected
def load_expected(test_manifest, metadata_path, test_path, tests):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
test_manifest.url_base)
if expected_manifest is None:
return
tests_by_id = {item.id: item for item in tests}
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if not test.id in tests_by_id:
test.remove()
# Add tests that don't have expected data
for test in tests:
if not expected_manifest.has_test(test.id):
expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected_manifest
|
DataDog/integrations-core | refs/heads/master | crio/tests/test_crio.py | 1 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import mock
import pytest
from datadog_checks.crio import CrioCheck
instance = {'prometheus_url': 'http://localhost:10249/metrics'}
CHECK_NAME = 'crio'
NAMESPACE = 'crio'
@pytest.fixture()
def mock_data():
f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'metrics.txt')
with open(f_name, 'r') as f:
text_data = f.read()
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"}
),
):
yield
def test_crio(aggregator, mock_data):
"""
Testing crio.
"""
c = CrioCheck(CHECK_NAME, {}, [instance])
c.check(instance)
aggregator.assert_metric(NAMESPACE + '.operations.count')
aggregator.assert_metric(NAMESPACE + '.operations.latency.count')
aggregator.assert_metric(NAMESPACE + '.operations.latency.sum')
aggregator.assert_metric(NAMESPACE + '.operations.latency.quantile')
aggregator.assert_metric(NAMESPACE + '.cpu.time')
aggregator.assert_metric(NAMESPACE + '.mem.resident')
aggregator.assert_metric(NAMESPACE + '.mem.virtual')
aggregator.assert_all_metrics_covered()
|
tara-sova/qreal | refs/heads/master | plugins/tools/visualInterpreter/examples/robotsCodeGeneration/reactionsStorage/WaitForColorIntensityBlockGenerator.py | 12 | # Application condition
waitFor.id == max_used_id and not cur_node_is_processed
# Reaction
port = "NXT_PORT_S" + waitFor.Port
condition = convertCondition(waitFor.Sign) + " " + waitFor.Intensity
wait_for_color_intensity_code = "while (!(ecrobot_get_nxtcolorsensor_light(" + port + ") " + condition + ")) {}\n"
wait_init_code = "ecrobot_init_nxtcolorsensor(" + port + ", NXT_COLORSENSOR));\n"
wait_terminate_code = "ecrobot_term_nxtcolorsensor(" + port + ");\n"
if wait_init_code not in init_code:
init_code.append(wait_init_code)
terminate_code.append(wait_terminate_code)
code.append([wait_for_color_intensity_code])
id_to_pos_in_code[waitFor.id] = len(code) - 1
cur_node_is_processed = True
|
jagguli/intellij-community | refs/heads/master | python/testData/inspections/AddCallSuperCommentAfterColonPreserved.py | 74 | class Example1:
def __init__(self):
self.field1 = 1
class Example2(Example1):
def <warning descr="Call to __init__ of super class is missed">__init<caret>__</warning>(self): # Some valuable comment here
pass |
anirudhSK/chromium | refs/heads/master | tools/telemetry/telemetry/core/platform/posix_platform_backend.py | 2 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils.spawn
import logging
import os
import re
import stat
import subprocess
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import ps_util
class PosixPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
# This is an abstract class. It is OK to have abstract methods.
# pylint: disable=W0223
def _RunCommand(self, args):
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
def _GetFileContents(self, path):
with open(path, 'r') as f:
return f.read()
def _GetPsOutput(self, columns, pid=None):
"""Returns output of the 'ps' command as a list of lines.
Subclass should override this function.
Args:
columns: A list of require columns, e.g., ['pid', 'pss'].
pid: If nont None, returns only the information of the process
with the pid.
"""
args = ['ps']
args.extend(['-p', str(pid)] if pid != None else ['-e'])
for c in columns:
args.extend(['-o', c + '='])
return self._RunCommand(args).splitlines()
def GetChildPids(self, pid):
"""Returns a list of child pids of |pid|."""
ps_output = self._GetPsOutput(['pid', 'ppid', 'state'])
ps_line_re = re.compile(
'\s*(?P<pid>\d+)\s*(?P<ppid>\d+)\s*(?P<state>\S*)\s*')
processes = []
for pid_ppid_state in ps_output:
m = ps_line_re.match(pid_ppid_state)
assert m, 'Did not understand ps output: %s' % pid_ppid_state
processes.append((m.group('pid'), m.group('ppid'), m.group('state')))
return ps_util.GetChildPids(processes, pid)
def GetCommandLine(self, pid):
command = self._GetPsOutput(['command'], pid)
return command[0] if command else None
def GetFlushUtilityName(self):
return 'clear_system_cache'
def CanLaunchApplication(self, application):
return bool(distutils.spawn.find_executable(application))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
assert application, 'Must specify application to launch'
if os.path.sep not in application:
application = distutils.spawn.find_executable(application)
assert application, 'Failed to find application in path'
args = [application]
if parameters:
assert isinstance(parameters, list), 'parameters must be a list'
args += parameters
def IsSetUID(path):
return (os.stat(path).st_mode & stat.S_ISUID) == stat.S_ISUID
def IsElevated():
p = subprocess.Popen(
['sudo', '-nv'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = p.communicate()[0]
# Some versions of sudo set the returncode based on whether sudo requires
# a password currently. Other versions return output when password is
# required and no output when the user is already authenticated.
return not p.returncode and not stdout
if elevate_privilege and not IsSetUID(application):
args = ['sudo'] + args
if not IsElevated():
print ('Telemetry needs to run %s under sudo. Please authenticate.' %
application)
subprocess.check_call(['sudo', '-v']) # Synchronously authenticate.
prompt = ('Would you like to always allow %s to be run as the current '
'user without sudo? If so, Telemetry will '
'`sudo chmod +s %s`. (y/N)' % (application, application))
if raw_input(prompt).lower() == 'y':
subprocess.check_call(['sudo', 'chmod', '+s', application])
stderror_destination = subprocess.PIPE
if logging.getLogger().isEnabledFor(logging.DEBUG):
stderror_destination = None
return subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=stderror_destination)
|
luuloe/python-duco | refs/heads/master | duco/modbus.py | 1 | """Support for Modbus."""
import logging
import struct
import threading
from duco.const import (
PROJECT_PACKAGE_NAME,
DUCO_MODBUS_BAUD_RATE,
DUCO_MODBUS_BYTE_SIZE,
DUCO_MODBUS_STOP_BITS,
DUCO_MODBUS_PARITY,
DUCO_MODBUS_METHOD
)
from duco.helpers import (twos_comp)
_LOGGER = logging.getLogger(PROJECT_PACKAGE_NAME)
# Type of network
CONF_MASTER_UNIT_ID = 'master_unit_id'
CONF_METHOD = 'method'
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_BAUDRATE = 'baudrate'
CONF_BYTESIZE = 'bytesize'
CONF_STOPBITS = 'stopbits'
CONF_TYPE = 'type'
CONF_PARITY = 'parity'
CONF_TIMEOUT = 'timeout'
REGISTER_TYPE_HOLDING = 'holding'
REGISTER_TYPE_INPUT = 'input'
DATA_TYPE_INT = 'int'
DATA_TYPE_FLOAT = 'float'
def create_client_config(modbus_client_type, modbus_client_port,
modbus_client_host=None, modbus_master_unit_id=0):
"""Create config dictionary."""
config = {CONF_TYPE: str(modbus_client_type),
CONF_PORT: str(modbus_client_port),
CONF_MASTER_UNIT_ID: int(modbus_master_unit_id),
CONF_TIMEOUT: int(3)}
# type specific part
if modbus_client_type == 'serial':
config[CONF_METHOD] = DUCO_MODBUS_METHOD
config[CONF_BAUDRATE] = DUCO_MODBUS_BAUD_RATE
config[CONF_BYTESIZE] = DUCO_MODBUS_BYTE_SIZE
config[CONF_STOPBITS] = DUCO_MODBUS_STOP_BITS
config[CONF_PARITY] = DUCO_MODBUS_PARITY
elif modbus_client_type == 'tcp':
config[CONF_HOST] = str(modbus_client_host)
else:
raise ValueError("modbus_client_type must be serial or tcp")
return config
class ModbusHub:
"""Thread safe wrapper class for pymodbus."""
def __init__(self, client_config):
"""Initialize the modbus hub."""
# generic configuration
self._client = None
self._kwargs = {'unit': client_config[CONF_MASTER_UNIT_ID]}
self._lock = threading.Lock()
self._config_type = client_config[CONF_TYPE]
self._config_port = client_config[CONF_PORT]
self._config_timeout = client_config[CONF_TIMEOUT]
self._config_delay = 0
if self._config_type == "serial":
# serial configuration
self._config_method = client_config[CONF_METHOD]
self._config_baudrate = client_config[CONF_BAUDRATE]
self._config_stopbits = client_config[CONF_STOPBITS]
self._config_bytesize = client_config[CONF_BYTESIZE]
self._config_parity = client_config[CONF_PARITY]
else:
# network configuration
self._config_host = client_config[CONF_HOST]
def setup(self):
"""Set up pymodbus client."""
if self._config_type == "serial":
from pymodbus.client.sync import ModbusSerialClient
self._client = ModbusSerialClient(
method=self._config_method,
port=self._config_port,
baudrate=self._config_baudrate,
stopbits=self._config_stopbits,
bytesize=self._config_bytesize,
parity=self._config_parity,
timeout=self._config_timeout,
retry_on_empty=True,
)
elif self._config_type == "rtuovertcp":
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.transaction import ModbusRtuFramer
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
framer=ModbusRtuFramer,
timeout=self._config_timeout,
)
elif self._config_type == "tcp":
from pymodbus.client.sync import ModbusTcpClient
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
elif self._config_type == "udp":
from pymodbus.client.sync import ModbusUdpClient
self._client = ModbusUdpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
else:
raise ValueError(("Unsupported config_type, must be serial, " +
"tcp, udp, rtuovertcp"))
# Connect device
self.connect()
def close(self):
"""Disconnect client."""
with self._lock:
self._client.close()
def connect(self):
"""Connect client."""
with self._lock:
self._client.connect()
def read_coils(self, address, count=1):
"""Read coils."""
with self._lock:
return self._client.read_coils(
address,
count,
**self._kwargs)
def read_input_registers(self, address, count=1):
"""Read input registers."""
with self._lock:
return self._client.read_input_registers(
address,
count,
**self._kwargs)
def read_holding_registers(self, address, count=1):
"""Read holding registers."""
with self._lock:
return self._client.read_holding_registers(
address,
count,
**self._kwargs)
def write_coil(self, address, value):
"""Write coil."""
with self._lock:
self._client.write_coil(
address,
value,
**self._kwargs)
def write_register(self, address, value):
"""Write register."""
with self._lock:
self._client.write_register(
address,
value,
**self._kwargs)
def write_registers(self, address, values):
"""Write registers."""
with self._lock:
self._client.write_registers(
address,
values,
**self._kwargs)
class ModbusRegister:
"""Modbus register."""
def __init__(self, hub, name, register, register_type,
unit_of_measurement, count, scale, offset, data_type,
precision):
"""Initialize the modbus register."""
self._hub = hub
self._name = name
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._scale = scale
self._offset = offset
self._precision = precision
self._data_type = data_type
self._value = None
def __str__(self):
"""Return the string representation of the register."""
return (self._name + ": " + str(self.value) + " " +
self._unit_of_measurement)
@property
def value(self):
"""Return the value of the register."""
self.update()
return self._value
@value.setter
def value(self, new_value):
"""Set the value of the node to new_value."""
if self._register_type != REGISTER_TYPE_HOLDING:
raise TypeError("Register must be of type HOLDING")
self._hub.write_register(self._register, new_value)
@property
def state(self):
"""Return the state of the register."""
return {'name': self._name,
'value': str(self.value),
'unit': self._unit_of_measurement}
@property
def name(self):
"""Return the name of the register."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Update the value of the register from the external hub."""
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._register,
self._count)
else:
result = self._hub.read_holding_registers(
self._register,
self._count)
val = 0
try:
registers = result.registers
except AttributeError:
_LOGGER.error("No response from modbus register %s",
self._register)
return
if self._data_type == DATA_TYPE_FLOAT:
byte_string = b''.join(
[x.to_bytes(2, byteorder='big') for x in registers]
)
val = struct.unpack(">f", byte_string)[0]
elif self._data_type == DATA_TYPE_INT:
for _, res in enumerate(registers):
val += twos_comp(res, 16)
self._value = format(
self._scale * val + self._offset, '.{}f'.format(self._precision))
|
odoomrp/odoomrp-wip | refs/heads/8.0 | quality_control_sale_stock/__init__.py | 87 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
|
tongwang01/tensorflow | refs/heads/master | tensorflow/python/platform/control_imports.py | 68 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switch between Google or open source dependencies."""
# Switch between Google and OSS dependencies
USE_OSS = True
# Per-dependency switches determining whether each dependency is ready
# to be replaced by its OSS equivalence.
# TODO(danmane,mrry,opensource): Flip these switches, then remove them
OSS_APP = True
OSS_FLAGS = True
OSS_GFILE = True
OSS_GOOGLETEST = True
OSS_LOGGING = True
OSS_PARAMETERIZED = True
|
ConnorGBrewster/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/tests/get_named_cookie/get.py | 6 | from datetime import datetime, timedelta
from tests.support.asserts import assert_success
from tests.support.fixtures import clear_all_cookies
from tests.support.inline import inline
def get_named_cookie(session, name):
return session.transport.send(
"GET", "session/{session_id}/cookie/{name}".format(
session_id=session.session_id,
name=name))
def test_get_named_session_cookie(session, url):
session.url = url("/common/blank.html")
clear_all_cookies(session)
session.execute_script("document.cookie = 'foo=bar'")
result = get_named_cookie(session, "foo")
cookie = assert_success(result)
assert isinstance(cookie, dict)
# table for cookie conversion
# https://w3c.github.io/webdriver/webdriver-spec.html#dfn-table-for-cookie-conversion
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert "path" in cookie
assert isinstance(cookie["path"], basestring)
assert "domain" in cookie
assert isinstance(cookie["domain"], basestring)
assert "secure" in cookie
assert isinstance(cookie["secure"], bool)
assert "httpOnly" in cookie
assert isinstance(cookie["httpOnly"], bool)
if "expiry" in cookie:
assert cookie.get("expiry") is None
assert cookie["name"] == "foo"
assert cookie["value"] == "bar"
def test_get_named_cookie(session, url):
session.url = url("/common/blank.html")
clear_all_cookies(session)
# same formatting as Date.toUTCString() in javascript
utc_string_format = "%a, %d %b %Y %H:%M:%S"
a_year_from_now = (datetime.utcnow() + timedelta(days=365)).strftime(utc_string_format)
session.execute_script("document.cookie = 'foo=bar;expires=%s'" % a_year_from_now)
result = get_named_cookie(session, "foo")
cookie = assert_success(result)
assert isinstance(cookie, dict)
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert "expiry" in cookie
assert isinstance(cookie["expiry"], (int, long))
assert cookie["name"] == "foo"
assert cookie["value"] == "bar"
# convert from seconds since epoch
assert datetime.utcfromtimestamp(
cookie["expiry"]).strftime(utc_string_format) == a_year_from_now
def test_duplicated_cookie(session, url, server_config):
new_cookie = {
"name": "hello",
"value": "world",
"domain": server_config["browser_host"],
"path": "/",
"http_only": False,
"secure": False
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
session.set_cookie(**new_cookie)
session.url = inline("""
<script>
document.cookie = '{name}=newworld; domain={domain}; path=/';
</script>""".format(
name=new_cookie["name"],
domain=server_config["browser_host"]))
result = get_named_cookie(session, new_cookie["name"])
cookie = assert_success(result)
assert isinstance(cookie, dict)
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert cookie["name"] == new_cookie["name"]
assert cookie["value"] == "newworld"
|
Kast0rTr0y/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py | 13 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: netapp_e_snapshot_volume
short_description: Manage E/EF-Series snapshot volumes.
description:
- Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
note: Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- storage array ID
required: True
snapshot_image_id:
required: True
description:
- The identifier of the snapshot image used to create the new snapshot volume.
- "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
full_threshold:
description:
- The repository utilization warning threshold percentage
default: 85
name:
required: True
description:
- The name you wish to give the snapshot volume
view_mode:
required: True
description:
- The snapshot volume access mode
choices:
- modeUnknown
- readWrite
- readOnly
- __UNDEFINED
repo_percentage:
description:
- The size of the view in relation to the size of the base volume
default: 20
storage_pool_name:
description:
- Name of the storage pool on which to allocate the repository volume.
required: True
state:
description:
- Whether to create or remove the snapshot volume
required: True
choices:
- absent
- present
"""
EXAMPLES = """
- name: Snapshot volume
netapp_e_snapshot_volume:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}/"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
state: present
storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
snapshot_image_id: "{{ snapshot_volume_image_id }}"
name: "{{ snapshot_volume_name }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume that was created.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotVolume(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=True),
snapshot_image_id=dict(type='str', required=True),
full_threshold=dict(type='int', default=85),
name=dict(type='str', required=True),
view_mode=dict(type='str', default='readOnly',
choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
repo_percentage=dict(type='int', default=20),
storage_pool_name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present'])
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.state = args['state']
self.ssid = args['ssid']
self.snapshot_image_id = args['snapshot_image_id']
self.full_threshold = args['full_threshold']
self.name = args['name']
self.view_mode = args['view_mode']
self.repo_percentage = args['repo_percentage']
self.storage_pool_name = args['storage_pool_name']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
if not self.url.endswith('/'):
self.url += '/'
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def ss_vol_exists(self):
rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
if ss_vols:
for ss_vol in ss_vols:
if ss_vol['name'] == self.name:
self.ss_vol = ss_vol
return True
else:
return False
return False
@property
def ss_vol_needs_update(self):
if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
return True
else:
return False
def create_ss_vol(self):
post_data = dict(
snapshotImageId=self.snapshot_image_id,
fullThreshold=self.full_threshold,
name=self.name,
viewMode=self.view_mode,
repositoryPercentage=self.repo_percentage,
repositoryPoolId=self.pool_id
)
rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
url_password=self.pwd, validate_certs=self.certs, method='POST')
self.ss_vol = create_resp
# Doing a check after creation because the creation call fails to set the specified warning threshold
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=True, **create_resp)
def update_ss_vol(self):
post_data = dict(
fullThreshold=self.full_threshold,
)
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
method='POST', validate_certs=self.certs)
self.module.exit_json(changed=True, **resp)
def remove_ss_vol(self):
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
method='DELETE')
self.module.exit_json(changed=True, msg="Volume successfully deleted")
def apply(self):
if self.state == 'present':
if self.ss_vol_exists:
if self.ss_vol_needs_update:
self.update_ss_vol()
else:
self.module.exit_json(changed=False, **self.ss_vol)
else:
self.create_ss_vol()
else:
if self.ss_vol_exists:
self.remove_ss_vol()
else:
self.module.exit_json(changed=False, msg="Volume already absent")
def main():
sv = SnapshotVolume()
sv.apply()
if __name__ == '__main__':
main()
|
shahbazn/neutron | refs/heads/master | neutron/extensions/dhcpagentscheduler.py | 29 | # Copyright (c) 2013 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.extensions import agent
from neutron import manager
from neutron import policy
from neutron import wsgi
DHCP_NET = 'dhcp-network'
DHCP_NETS = DHCP_NET + 's'
DHCP_AGENT = 'dhcp-agent'
DHCP_AGENTS = DHCP_AGENT + 's'
class NetworkSchedulerController(wsgi.Controller):
def index(self, request, **kwargs):
plugin = manager.NeutronManager.get_plugin()
policy.enforce(request.context,
"get_%s" % DHCP_NETS,
{})
return plugin.list_networks_on_dhcp_agent(
request.context, kwargs['agent_id'])
def create(self, request, body, **kwargs):
plugin = manager.NeutronManager.get_plugin()
policy.enforce(request.context,
"create_%s" % DHCP_NET,
{})
agent_id = kwargs['agent_id']
network_id = body['network_id']
result = plugin.add_network_to_dhcp_agent(request.context, agent_id,
network_id)
notify(request.context, 'dhcp_agent.network.add', network_id, agent_id)
return result
def delete(self, request, id, **kwargs):
plugin = manager.NeutronManager.get_plugin()
policy.enforce(request.context,
"delete_%s" % DHCP_NET,
{})
agent_id = kwargs['agent_id']
result = plugin.remove_network_from_dhcp_agent(request.context,
agent_id, id)
notify(request.context, 'dhcp_agent.network.remove', id, agent_id)
return result
class DhcpAgentsHostingNetworkController(wsgi.Controller):
def index(self, request, **kwargs):
plugin = manager.NeutronManager.get_plugin()
policy.enforce(request.context,
"get_%s" % DHCP_AGENTS,
{})
return plugin.list_dhcp_agents_hosting_network(
request.context, kwargs['network_id'])
class Dhcpagentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting dhcp agent scheduler.
"""
@classmethod
def get_name(cls):
return "DHCP Agent Scheduler"
@classmethod
def get_alias(cls):
return constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS
@classmethod
def get_description(cls):
return "Schedule networks among dhcp agents"
@classmethod
def get_updated(cls):
return "2013-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(NetworkSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
DHCP_NETS, controller, parent))
parent = dict(member_name="network",
collection_name="networks")
controller = resource.Resource(DhcpAgentsHostingNetworkController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(
DHCP_AGENTS, controller, parent))
return exts
def get_extended_resources(self, version):
return {}
class InvalidDHCPAgent(agent.AgentNotFound):
message = _("Agent %(id)s is not a valid DHCP Agent or has been disabled")
class NetworkHostedByDHCPAgent(exceptions.Conflict):
message = _("The network %(network_id)s has been already hosted"
" by the DHCP Agent %(agent_id)s.")
class NetworkNotHostedByDhcpAgent(exceptions.Conflict):
message = _("The network %(network_id)s is not hosted"
" by the DHCP agent %(agent_id)s.")
class DhcpAgentSchedulerPluginBase(object):
"""REST API to operate the DHCP agent scheduler.
All of method must be in an admin context.
"""
@abc.abstractmethod
def add_network_to_dhcp_agent(self, context, id, network_id):
pass
@abc.abstractmethod
def remove_network_from_dhcp_agent(self, context, id, network_id):
pass
@abc.abstractmethod
def list_networks_on_dhcp_agent(self, context, id):
pass
@abc.abstractmethod
def list_dhcp_agents_hosting_network(self, context, network_id):
pass
def notify(context, action, network_id, agent_id):
info = {'id': agent_id, 'network_id': network_id}
notifier = n_rpc.get_notifier('network')
notifier.info(context, action, {'agent': info})
|
Shrhawk/edx-platform | refs/heads/master | common/djangoapps/student/migrations/0025_auto__add_field_courseenrollmentallowed_auto_enroll.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseEnrollmentAllowed.auto_enroll'
db.add_column('student_courseenrollmentallowed', 'auto_enroll',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseEnrollmentAllowed.auto_enroll'
db.delete_column('student_courseenrollmentallowed', 'auto_enroll')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'db_index': 'False', 'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
RAFTDevTeam/raft | refs/heads/master | thirdparty/pyamf/pyamf/adapters/util.py | 11 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Useful helpers for adapters.
@since: 0.4
"""
import builtins
if not hasattr(__builtins__, 'set'):
from sets import Set as set
def to_list(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{list}.
"""
return list(obj)
def to_dict(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{dict}.
"""
return dict(obj)
def to_set(obj, encoder):
"""
Converts an arbitrary object C{obj} to a C{set}.
"""
return set(obj)
def to_tuple(x, encoder):
"""
Converts an arbitrary object C{obj} to a C{tuple}.
"""
return tuple(x)
def to_string(x, encoder):
"""
Converts an arbitrary object C{obj} to a string.
@since: 0.5
"""
return str(x)
|
Jaccorot/django-cms | refs/heads/develop | cms/south_migrations/0071_mptt_to_mp.py | 51 | # -*- coding: utf-8 -*-
from django.db.models import F
from django.middleware import transaction
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from treebeard.numconv import NumConv
STEPLEN = 4
ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class MP_AddHandler(object):
def __init__(self):
self.stmts = []
NUM = NumConv(len(ALPHABET), ALPHABET)
def _int2str(num):
return NUM.int2str(num)
def _str2int(num):
return NUM.str2int(num)
def _get_basepath(path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * STEPLEN]
return ''
def _get_path(path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = _get_basepath(path, depth - 1)
key = _int2str(newstep)
return '{0}{1}{2}'.format(
parentpath,
ALPHABET[0] * (STEPLEN - len(key)),
key
)
def _inc_path(obj):
""":returns: The path of the next sibling of a given node path."""
newpos = _str2int(obj.path[-STEPLEN:]) + 1
key = _int2str(newpos)
if len(key) > STEPLEN:
raise Exception(_("Path Overflow from: '%s'" % (obj.path, )))
return '{0}{1}{2}'.format(
obj.path[:-STEPLEN],
ALPHABET[0] * (STEPLEN - len(key)),
key
)
class MP_AddRootHandler(MP_AddHandler):
def __init__(self, **kwargs):
super(MP_AddRootHandler, self).__init__()
self.kwargs = kwargs
def process(self):
# do we have a root node already?
last_root = self.kwargs['last_root']
if last_root:
# adding the new root node as the last one
newpath = _inc_path(last_root)
else:
# adding the first root node
newpath = _get_path(None, 1, 1)
newobj = self.kwargs['instance']
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
return newobj
class MP_AddChildHandler(MP_AddHandler):
def __init__(self, node, model, **kwargs):
super(MP_AddChildHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.kwargs = kwargs
self.model = model
def process(self):
newobj = self.kwargs['instance']
newobj.depth = self.node.depth + 1
if self.node.numchild == 0:
# the node had no children, adding the first child
newobj.path = _get_path(
self.node.path, newobj.depth, 1)
max_length = self.node_cls._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise Exception(
_('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database'))
else:
# adding the new child as the last one
newobj.path = _inc_path(self.node.last_child)
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self.node
self.model.objects.filter(
path=self.node.path).update(numchild=F('numchild')+1)
# we increase the numchild value of the object in memory
self.node.numchild += 1
return newobj
class Migration(DataMigration):
def forwards(self, orm):
pages = orm['cms.Page'].objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for page in pages:
if not page.parent_id:
handler = MP_AddRootHandler(instance=page, last_root=last_root)
handler.process()
last_root = page
page.last_child = None
else:
parent = cache[page.parent_id]
handler = MP_AddChildHandler(parent, orm['cms.Page'], instance=page)
handler.process()
parent.last_child = page
cache[page.pk] = page
plugins = orm['cms.CMSPlugin'].objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for plugin in plugins:
if not plugin.parent_id:
handler = MP_AddRootHandler(instance=plugin, last_root=last_root)
handler.process()
last_root = plugin
plugin.last_child = None
else:
parent = cache[plugin.parent_id]
handler = MP_AddChildHandler(parent, orm['cms.CMSPlugin'], instance=plugin)
handler.process()
parent.last_child = plugin
cache[plugin.pk] = plugin
def backwards(self, orm):
print("***********************************")
print("***********************************")
print("***********************************")
print(" ATTENTION")
print(" =========")
print("")
print("Your tree is now in an nonfunctional")
print("state. Please install an old version")
print("of django CMS (3.0.5) and run:")
print(" python manage.py cms fix-mptt")
print()
print()
print("************************************")
print("************************************")
print("************************************")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
|
kparal/anaconda | refs/heads/master | tests/gui/inside/welcome.py | 13 | # Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Chris Lumens <clumens@redhat.com>
from . import UITestCase
# This test case handles the basic case on the welcome language spoke where
# everything works as intended. On this spoke, we are testing the following:
#
# * The default language and locale are selected and displayed at the top
# of their views.
# * Clicking the Quit button brings up a dialog asking if you're sure, though
# we're not going to test that confirming actually quits.
# * Clicking the Continue button brings up the betanag dialog, though we're
# not going to test the quit button there either.
#
# TODO:
# * Entering text into the search box should result in narrowing down the
# contents of the left hand view.
class BasicWelcomeTestCase(UITestCase):
def check_lang_locale_views(self, spoke):
# FIXME: This encodes default information.
lang = "English"
locale = "English (United States)"
view = self.find("Languages", node=spoke)
self.assertIsNotNone(view, "Language view not found")
enabled = self.selected_view_children(view)
# We get back a list of [native name, english name, language setting] for each actual language.
self.assertEqual(len(enabled), 3, msg="An unexpected number of languages are selected")
self.assertEqual(enabled[0].text, lang)
view = self.find("Locales", node=spoke)
self.assertIsNotNone(view, "Locale view not found")
enabled = self.selected_view_children(view)
self.assertEqual(len(enabled), 1, msg="An unexpected number of locales are selected")
self.assertEqual(enabled[0].text, locale)
def check_quit_button(self, spoke):
self.click_button("_Quit", node=spoke)
dlg = self.check_dialog_displayed("Quit")
self.click_button("No", node=dlg)
def check_continue_button(self, spoke):
self.click_button("_Continue", node=spoke)
dlg = self.check_dialog_displayed("Beta Warn")
self.click_button("I accept my fate.", dlg)
def _run(self):
# Before doing anything, verify we are on the right screen.
w = self.check_window_displayed("WELCOME")
# And now we can check everything else on the screen.
self.check_help_button(w)
self.check_keyboard_layout_indicator("us", node=w)
self.check_lang_locale_views(w)
self.check_quit_button(w)
self.check_continue_button(w)
|
hassanabidpk/django | refs/heads/master | django/db/backends/sqlite3/schema.py | 309 | import codecs
import contextlib
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
with self.connection.cursor() as c:
# Some SQLite schema alterations need foreign key constraints to be
# disabled. This is the default in SQLite but can be changed with a
# build flag and might change in future, so can't be relied upon.
# We enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
return super(DatabaseSchemaEditor, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super(DatabaseSchemaEditor, self).__exit__(exc_type, exc_value, traceback)
with self.connection.cursor() as c:
# Restore initial FK setting - PRAGMA values can't be parametrized
c.execute('PRAGMA foreign_keys = %s' % int(self._initial_pragma_fk))
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
import sqlite3
try:
value = sqlite3.adapt(value)
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
"""
Shortcut to transform a model from old_model into new_model
The essential steps are:
1. rename the model's existing table, e.g. "app_model" to "app_model__old"
2. create a table with the updated definition called "app_model"
3. copy the data from the old renamed table to the new table
4. delete the "app_model__old" table
"""
# Work out the new fields dict / mapping
body = {f.name: f for f in model._meta.local_concrete_fields}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Choose a default and insert it into the copy map
if not field.many_to_many and field.concrete:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# We need to modify model._meta.db_table, but everything explodes
# if the change isn't reversed before the end of this method. This
# context manager helps us avoid that situation.
@contextlib.contextmanager
def altered_table_name(model, temporary_table_name):
original_table_name = model._meta.db_table
model._meta.db_table = temporary_table_name
yield
model._meta.db_table = original_table_name
with altered_table_name(model, model._meta.db_table + "__old"):
# Rename the old table to make way for the new
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Create a new table with the updated schema. We remove things
# from the deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if temp_model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table into the new table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_fields=[(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
|
codeaudit/gp-structure-search | refs/heads/master | source/mitparallel/util.py | 7 | import config
import os
import tempfile
def mkstemp_safe(directory, suffix):
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def create_temp_file(extension):
return mkstemp_safe(config.TEMP_PATH, extension)
|
pp-mo/iris-grib | refs/heads/master | iris_grib/tests/unit/save_rules/test_identification.py | 1 | # (C) British Crown Copyright 2016, Met Office
#
# This file is part of iris-grib.
#
# iris-grib is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iris-grib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with iris-grib. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for `iris_grib.grib_save_rules.identification`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris_grib.tests first so that some things can be initialised before
# importing anything else.
import iris_grib.tests as tests
import gribapi
import mock
import iris
import iris.tests.stock as stock
from iris_grib._save_rules import identification
from iris_grib.tests.unit import TestGribSimple
GRIB_API = 'iris_grib._save_rules.gribapi'
class Test(TestGribSimple):
@tests.skip_data
def test_no_realization(self):
cube = stock.simple_pp()
grib = mock.Mock()
mock_gribapi = mock.Mock(spec=gribapi)
with mock.patch(GRIB_API, mock_gribapi):
identification(cube, grib)
mock_gribapi.assert_has_calls(
[mock.call.grib_set_long(grib, "typeOfProcessedData", 2)])
@tests.skip_data
def test_realization_0(self):
cube = stock.simple_pp()
realisation = iris.coords.AuxCoord((0,), standard_name='realization',
units='1')
cube.add_aux_coord(realisation)
grib = mock.Mock()
mock_gribapi = mock.Mock(spec=gribapi)
with mock.patch(GRIB_API, mock_gribapi):
identification(cube, grib)
mock_gribapi.assert_has_calls(
[mock.call.grib_set_long(grib, "typeOfProcessedData", 3)])
@tests.skip_data
def test_realization_n(self):
cube = stock.simple_pp()
realisation = iris.coords.AuxCoord((2,), standard_name='realization',
units='1')
cube.add_aux_coord(realisation)
grib = mock.Mock()
mock_gribapi = mock.Mock(spec=gribapi)
with mock.patch(GRIB_API, mock_gribapi):
identification(cube, grib)
mock_gribapi.assert_has_calls(
[mock.call.grib_set_long(grib, "typeOfProcessedData", 4)])
if __name__ == "__main__":
tests.main()
|
luo66/scikit-learn | refs/heads/master | sklearn/preprocessing/label.py | 137 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
Luciden/easl | refs/heads/master | easl/visualize/visualizer.py | 1 | __author__ = 'Dennis'
class Visual(object):
@staticmethod
def visualize(self):
"""
Parameters
----------
self : object
Any object that will be visualized.
Returns
-------
visualization : Visualization
"""
raise NotImplementedError("Base Class")
class Visualization(object):
def __init__(self, name, show_name=False):
self.name = name
self.show_name = show_name
class Group(Visualization):
def __init__(self, name):
super(Group, self).__init__(name)
self.elements = []
def add_element(self, element):
if element is not None:
self.elements.append(element)
def get_elements(self):
return self.elements
class Rows(Group):
def __init__(self, name):
super(Rows, self).__init__(name)
class Columns(Group):
def __init__(self, name):
super(Columns, self).__init__(name)
class Slider(Visualization):
"""
A slider with a fixed number of positions.
A horizontal
--|-----
or vertical
|
+
|
slider.
Attributes
----------
name : string
"""
def __init__(self, name, number, position):
super(Slider, self).__init__(name)
self.name = name
self.number = number
if 0 <= position < number:
self.position = position
else:
raise RuntimeError("position not in slide")
class Table(Visualization):
"""
A table.
A B
C 1 0
D 2 4
Attributes
----------
name : string
"""
def __init__(self, name):
super(Table, self).__init__(name)
class Tree(Visualization):
def __init__(self, name, tree):
"""
Attributes
----------
tree : {name: {name: ...{name: value}}}
"""
super(Tree, self).__init__(name)
self.tree = tree
class Number(Visualization):
def __init__(self, name, number):
super(Number, self).__init__(name)
self.number = number
class Grid(Visualization):
def __init__(self, name, w, h):
super(Grid, self).__init__(name)
self.grid = [[None for _ in range(w)] for _ in range(h)]
self.w = w
self.h = h
def add_element(self, element, y, x):
if 0 <= x < self.w and 0 <= y < self.h:
self.grid[x][y] = element
def element_at(self, x, y):
return self.grid[x][y]
class List(Visualization):
def __init__(self, name, elements):
super(List, self).__init__(name)
self.elements = elements
class Dict(Visualization):
def __init__(self, name, elements):
super(Dict, self).__init__(name)
self.elements = elements
class Circle(Visualization):
def __init__(self, name, v_min, v_max, v):
super(Circle, self).__init__(name)
self.v_min = v_min
self.v_max = v_max
self.v = v
class Graph(Visualization):
def __init__(self, name, graph, nodes, edges, groups=None):
super(Graph, self).__init__(name)
self.graph = graph
self.nodes = nodes
self.edges = edges
self.groups = groups
class Visualizer(object):
def __init__(self):
self.visualizations = None
def reset_visualization(self):
self.visualizations = Rows("main")
def update_visualization(self, v):
if v is None:
return
else:
self.visualizations.add_element(v)
def update(self, iteration):
"""Draws all the current visualizations to the screen.
"""
raise NotImplementedError("Blah")
|
GunoH/intellij-community | refs/heads/master | python/testData/quickFixes/PyAddImportQuickFixTest/orderingPathComponentsNumber/b/c/__init__.py | 119 | foo = 2 |
zturchan/CMPUT410-Lab6 | refs/heads/master | v1/lib/python2.7/site-packages/django/conf/locale/el/__init__.py | 12133432 | |
SRabbelier/Melange | refs/heads/master | thirdparty/google_appengine/lib/django_1_2/tests/modeltests/proxy_model_inheritance/app1/__init__.py | 12133432 | |
paboldin/rally | refs/heads/master | rally/plugins/openstack/scenarios/designate/__init__.py | 12133432 | |
beck/django | refs/heads/master | tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/__init__.py | 12133432 | |
Qalthos/ansible | refs/heads/devel | lib/ansible/modules/web_infrastructure/__init__.py | 12133432 | |
Zlash65/erpnext | refs/heads/develop | erpnext/patches/v6_21/rename_material_request_fields.py | 61 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
frappe.reload_doc('stock', 'doctype', 'material_request_item')
rename_field("Material Request Item", "sales_order_no", "sales_order")
frappe.reload_doc('support', 'doctype', 'maintenance_schedule_item')
rename_field("Maintenance Schedule Item", "prevdoc_docname", "sales_order")
|
Kiiv/CouchPotatoServer | refs/heads/develop | libs/requests/packages/chardet/chardetect.py | 743 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
|
zhuwenping/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/importlib/test/source/__init__.py | 52 | import importlib.test
import os.path
import unittest
def test_suite():
directory = os.path.dirname(__file__)
return importlib.test.test_suite('importlib.test.source', directory)
if __name__ == '__main__':
from test.support import run_unittest
run_unittest(test_suite())
|
40423213/2016fallcadp_hw | refs/heads/gh-pages | plugin/liquid_tags/notebook.py | 235 | """
Notebook Tag
------------
This is a liquid-style tag to include a static html rendering of an IPython
notebook in a blog post.
Syntax
------
{% notebook filename.ipynb [ cells[start:end] ]%}
The file should be specified relative to the ``notebooks`` subdirectory of the
content directory. Optionally, this subdirectory can be specified in the
config file:
NOTEBOOK_DIR = 'notebooks'
The cells[start:end] statement is optional, and can be used to specify which
block of cells from the notebook to include.
Requirements
------------
- The plugin requires IPython version 1.0 or above. It no longer supports the
standalone nbconvert package, which has been deprecated.
Details
-------
Because the notebook relies on some rather extensive custom CSS, the use of
this plugin requires additional CSS to be inserted into the blog theme.
After typing "make html" when using the notebook tag, a file called
``_nb_header.html`` will be produced in the main directory. The content
of the file should be included in the header of the theme. An easy way
to accomplish this is to add the following lines within the header template
of the theme you use:
{% if EXTRA_HEADER %}
{{ EXTRA_HEADER }}
{% endif %}
and in your ``pelicanconf.py`` file, include the line:
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
this will insert the appropriate CSS. All efforts have been made to ensure
that this CSS will not override formats within the blog theme, but there may
still be some conflicts.
"""
import re
import os
from functools import partial
from .mdx_liquid_tags import LiquidTags
import IPython
IPYTHON_VERSION = IPython.version_info[0]
try:
import nbformat
except:
pass
if not IPYTHON_VERSION >= 1:
raise ValueError("IPython version 1.0+ required for notebook tag")
try:
from nbconvert.filters.highlight import _pygments_highlight
except ImportError:
try:
from IPython.nbconvert.filters.highlight import _pygments_highlight
except ImportError:
# IPython < 2.0
from IPython.nbconvert.filters.highlight import _pygment_highlight as _pygments_highlight
from pygments.formatters import HtmlFormatter
try:
from nbconvert.exporters import HTMLExporter
except ImportError:
from IPython.nbconvert.exporters import HTMLExporter
try:
from traitlets.config import Config
except ImportError:
from IPython.config import Config
try:
from nbconvert.preprocessors import Preprocessor
except ImportError:
try:
from IPython.nbconvert.preprocessors import Preprocessor
except ImportError:
# IPython < 2.0
from IPython.nbconvert.transformers import Transformer as Preprocessor
try:
from traitlets import Integer
except ImportError:
from IPython.utils.traitlets import Integer
from copy import deepcopy
#----------------------------------------------------------------------
# Some code that will be added to the header:
# Some of the following javascript/css include is adapted from
# IPython/nbconvert/templates/fullhtml.tpl, while some are custom tags
# specifically designed to make the results look good within the
# pelican-octopress theme.
JS_INCLUDE = r"""
<style type="text/css">
/* Overrides of notebook CSS for static HTML export */
div.entry-content {
overflow: visible;
padding: 8px;
}
.input_area {
padding: 0.2em;
}
a.heading-anchor {
white-space: normal;
}
.rendered_html
code {
font-size: .8em;
}
pre.ipynb {
color: black;
background: #f7f7f7;
border: none;
box-shadow: none;
margin-bottom: 0;
padding: 0;
margin: 0px;
font-size: 13px;
}
/* remove the prompt div from text cells */
div.text_cell .prompt {
display: none;
}
/* remove horizontal padding from text cells, */
/* so it aligns with outer body text */
div.text_cell_render {
padding: 0.5em 0em;
}
img.anim_icon{padding:0; border:0; vertical-align:middle; -webkit-box-shadow:none; -box-shadow:none}
div.collapseheader {
width=100%;
background-color:#d3d3d3;
padding: 2px;
cursor: pointer;
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
}
</style>
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML" type="text/javascript"></script>
<script type="text/javascript">
init_mathjax = function() {
if (window.MathJax) {
// MathJax loaded
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ]
},
displayAlign: 'left', // Change this to 'center' to center equations.
"HTML-CSS": {
styles: {'.MathJax_Display': {"margin": 0}}
}
});
MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
}
}
init_mathjax();
</script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script type="text/javascript">
jQuery(document).ready(function($) {
$("div.collapseheader").click(function () {
$header = $(this).children("span").first();
$codearea = $(this).children(".input_area");
console.log($(this).children());
$codearea.slideToggle(500, function () {
$header.text(function () {
return $codearea.is(":visible") ? "Collapse Code" : "Expand Code";
});
});
});
});
</script>
"""
CSS_WRAPPER = """
<style type="text/css">
{0}
</style>
"""
#----------------------------------------------------------------------
# Create a custom preprocessor
class SliceIndex(Integer):
"""An integer trait that accepts None"""
default_value = None
def validate(self, obj, value):
if value is None:
return value
else:
return super(SliceIndex, self).validate(obj, value)
class SubCell(Preprocessor):
"""A transformer to select a slice of the cells of a notebook"""
start = SliceIndex(0, config=True,
help="first cell of notebook to be converted")
end = SliceIndex(None, config=True,
help="last cell of notebook to be converted")
def preprocess(self, nb, resources):
nbc = deepcopy(nb)
if IPYTHON_VERSION < 3:
for worksheet in nbc.worksheets:
cells = worksheet.cells[:]
worksheet.cells = cells[self.start:self.end]
else:
nbc.cells = nbc.cells[self.start:self.end]
return nbc, resources
call = preprocess # IPython < 2.0
#----------------------------------------------------------------------
# Custom highlighter:
# instead of using class='highlight', use class='highlight-ipynb'
def custom_highlighter(source, language='ipython', metadata=None):
formatter = HtmlFormatter(cssclass='highlight-ipynb')
if not language:
language = 'ipython'
output = _pygments_highlight(source, formatter, language)
return output.replace('<pre>', '<pre class="ipynb">')
#----------------------------------------------------------------------
# Below is the pelican plugin code.
#
SYNTAX = "{% notebook /path/to/notebook.ipynb [ cells[start:end] ] [ language[language] ] %}"
FORMAT = re.compile(r"""^(\s+)?(?P<src>\S+)(\s+)?((cells\[)(?P<start>-?[0-9]*):(?P<end>-?[0-9]*)(\]))?(\s+)?((language\[)(?P<language>-?[a-z0-9\+\-]*)(\]))?(\s+)?$""")
@LiquidTags.register('notebook')
def notebook(preprocessor, tag, markup):
match = FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
if start:
start = int(start)
else:
start = 0
if end:
end = int(end)
else:
end = None
language_applied_highlighter = partial(custom_highlighter, language=language)
nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
nb_path = os.path.join('content', nb_dir, src)
if not os.path.exists(nb_path):
raise ValueError("File {0} could not be found".format(nb_path))
# Create the custom notebook converter
c = Config({'CSSHTMLHeaderTransformer':
{'enabled':True, 'highlight_class':'.highlight-ipynb'},
'SubCell':
{'enabled':True, 'start':start, 'end':end}})
template_file = 'basic'
if IPYTHON_VERSION >= 3:
if os.path.exists('pelicanhtml_3.tpl'):
template_file = 'pelicanhtml_3'
elif IPYTHON_VERSION == 2:
if os.path.exists('pelicanhtml_2.tpl'):
template_file = 'pelicanhtml_2'
else:
if os.path.exists('pelicanhtml_1.tpl'):
template_file = 'pelicanhtml_1'
if IPYTHON_VERSION >= 2:
subcell_kwarg = dict(preprocessors=[SubCell])
else:
subcell_kwarg = dict(transformers=[SubCell])
exporter = HTMLExporter(config=c,
template_file=template_file,
filters={'highlight2html': language_applied_highlighter},
**subcell_kwarg)
# read and parse the notebook
with open(nb_path, encoding="utf-8") as f:
nb_text = f.read()
if IPYTHON_VERSION < 3:
nb_json = IPython.nbformat.current.reads_json(nb_text)
else:
try:
nb_json = nbformat.reads(nb_text, as_version=4)
except:
nb_json = IPython.nbformat.reads(nb_text, as_version=4)
(body, resources) = exporter.from_notebook_node(nb_json)
# if we haven't already saved the header, save it here.
if not notebook.header_saved:
print ("\n ** Writing styles to _nb_header.html: "
"this should be included in the theme. **\n")
header = '\n'.join(CSS_WRAPPER.format(css_line)
for css_line in resources['inlining']['css'])
header += JS_INCLUDE
with open('_nb_header.html', 'w', encoding="utf-8") as f:
f.write(header)
notebook.header_saved = True
# this will stash special characters so that they won't be transformed
# by subsequent processes.
body = preprocessor.configs.htmlStash.store(body, safe=True)
return body
notebook.header_saved = False
#----------------------------------------------------------------------
# This import allows notebook to be a Pelican plugin
from liquid_tags import register
|
romain-dartigues/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_webfilter_override.py | 7 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_override
short_description: Configure FortiGuard Web Filter administrative overrides.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and override category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_override:
description:
- Configure FortiGuard Web Filter administrative overrides.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
expires:
description:
- "Override expiration date and time, from 5 minutes to 365 from now (format: yyyy/mm/dd hh:mm:ss)."
id:
description:
- Override rule ID.
required: true
initiator:
description:
- Initiating user of override (read-only setting).
ip:
description:
- IPv4 address which the override applies.
ip6:
description:
- IPv6 address which the override applies.
new-profile:
description:
- Name of the new web filter profile used by the override. Source webfilter.profile.name.
old-profile:
description:
- Name of the web filter profile which the override applies. Source webfilter.profile.name.
scope:
description:
- Override either the specific user, user group, IPv4 address, or IPv6 address.
choices:
- user
- user-group
- ip
- ip6
status:
description:
- Enable/disable override rule.
choices:
- enable
- disable
user:
description:
- Name of the user which the override applies.
user-group:
description:
- Specify the user group for which the override applies. Source user.group.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure FortiGuard Web Filter administrative overrides.
fortios_webfilter_override:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_override:
state: "present"
expires: "<your_own_value>"
id: "4"
initiator: "<your_own_value>"
ip: "<your_own_value>"
ip6: "<your_own_value>"
new-profile: "<your_own_value> (source webfilter.profile.name)"
old-profile: "<your_own_value> (source webfilter.profile.name)"
scope: "user"
status: "enable"
user: "<your_own_value>"
user-group: "<your_own_value> (source user.group.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_webfilter_override_data(json):
option_list = ['expires', 'id', 'initiator',
'ip', 'ip6', 'new-profile',
'old-profile', 'scope', 'status',
'user', 'user-group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def webfilter_override(data, fos):
vdom = data['vdom']
webfilter_override_data = data['webfilter_override']
filtered_data = filter_webfilter_override_data(webfilter_override_data)
if webfilter_override_data['state'] == "present":
return fos.set('webfilter',
'override',
data=filtered_data,
vdom=vdom)
elif webfilter_override_data['state'] == "absent":
return fos.delete('webfilter',
'override',
mkey=filtered_data['id'],
vdom=vdom)
def fortios_webfilter(data, fos):
login(data)
methodlist = ['webfilter_override']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"webfilter_override": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"expires": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"initiator": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"ip6": {"required": False, "type": "str"},
"new-profile": {"required": False, "type": "str"},
"old-profile": {"required": False, "type": "str"},
"scope": {"required": False, "type": "str",
"choices": ["user", "user-group", "ip",
"ip6"]},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"user": {"required": False, "type": "str"},
"user-group": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_webfilter(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
silly-wacky-3-town-toon/SOURCE-COD | refs/heads/master | dubrari/tools/compiler/dna/components/DNALandmarkBuilding.py | 2 | from DNANode import DNANode
from dna.base.DNAPacker import *
class DNALandmarkBuilding(DNANode):
COMPONENT_CODE = 13
def __init__(self, name):
DNANode.__init__(self, name)
self.code = ''
self.wallColor = (1, 1, 1, 1)
def setCode(self, code):
self.code = code
def setWallColor(self, color):
self.wallColor = color
def traverse(self, recursive=True, verbose=False):
packer = DNANode.traverse(self, recursive=False, verbose=verbose)
packer.name = 'DNALandmarkBuilding' # Override the name for debugging.
packer.pack('code', self.code, STRING)
packer.packColor('wall color', *self.wallColor)
if recursive:
packer += self.traverseChildren(verbose=verbose)
return packer
|
jagguli/intellij-community | refs/heads/master | python/testData/intentions/afterParamSectionBeforeKeywords.py | 53 | def f(x):
"""
Args:
x:
Keyword arguments:
Returns:
None
"""
|
fpy171/django | refs/heads/master | tests/queryset_pickle/__init__.py | 12133432 | |
cruzegoodin/TSC-ShippingDetails | refs/heads/master | flask/lib/python2.7/site-packages/pip/_vendor/requests/compat.py | 1038 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
pwil3058/darning | refs/heads/master | darning/cli/__init__.py | 1 | ### Copyright (C) 2010 Peter Williams <peter_ono@users.sourceforge.net>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
Library functions that are ony of interest CLI programs
"""
# This should be the only place that subcmd_* modules should be imported
# as this is sufficient to activate them.
from . import subcmd_init
from . import subcmd_new
from . import subcmd_push
from . import subcmd_pop
from . import subcmd_add
from . import subcmd_refresh
from . import subcmd_import
from . import subcmd_drop
from . import subcmd_remove
from . import subcmd_files
from . import subcmd_series
from . import subcmd_export
from . import subcmd_diff
from . import subcmd_copy
from . import subcmd_move
from . import subcmd_fold
from . import subcmd_absorb
from . import subcmd_rename
from . import subcmd_validate
from . import subcmd_duplicate
from . import subcmd_delete
from . import subcmd_select
from . import subcmd_guard
from . import subcmd_kept
|
asydorchuk/ml | refs/heads/master | classes/cs231n/assignment2/cs231n/layers.py | 1 | import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
#############################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
#############################################################################
out = x.reshape(x.shape[0], -1).dot(w) + b
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
#############################################################################
# TODO: Implement the affine backward pass. #
#############################################################################
dx = dout.dot(w.T).reshape(x.shape)
dw = x.reshape(x.shape[0], -1).T.dot(dout).reshape(w.shape)
db = np.ones((1, dout.shape[0])).dot(dout).reshape(b.shape)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
out = np.maximum(0, x)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
dx = np.zeros(x.shape)
dx[cache > 0.0] = 1.0
dx *= dout
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def mean_forward(x):
out, cache = None, None
out = x - x.mean(axis=0)
return out, cache
def mean_backward(dout, cache):
dx = dout - dout.mean(axis=0)
return dx
def var_forward(x, eps):
out, cache = None, None
sdev = np.sqrt(x.var(axis=0) + eps)
cache = x, eps, sdev
out = x / sdev
return out, cache
def var_backward(dout, cache):
x, eps, sdev = cache
dx = dout / sdev - x / np.power(sdev, 3) * ((x * dout).sum(axis=0)) / x.shape[0]
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, {}
if mode == 'train':
#############################################################################
# TODO: Implement the training-time forward pass for batch normalization. #
# Use minibatch statistics to compute the mean and variance, use these #
# statistics to normalize the incoming data, and scale and shift the #
# normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates that #
# you need for the backward pass should be stored in the cache variable. #
# #
# You should also use your computed sample mean and variance together with #
# the momentum variable to update the running mean and running variance, #
# storing your result in the running_mean and running_var variables. #
#############################################################################
sample_mean = x.mean(axis=0)
xmean = x - sample_mean
sample_var = xmean.var(axis=0)
xvar = xmean / np.sqrt(sample_var + eps)
xnorm = xvar
cache = x, xmean, xvar, gamma, beta, sample_mean, sample_var, eps
out = xnorm * gamma + beta
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
#############################################################################
# END OF YOUR CODE #
#############################################################################
elif mode == 'test':
#############################################################################
# TODO: Implement the test-time forward pass for batch normalization. Use #
# the running mean and variance to normalize the incoming data, then scale #
# and shift the normalized data using gamma and beta. Store the result in #
# the out variable. #
#############################################################################
out = (x - running_mean) / np.sqrt(running_var + eps)
out = out * gamma + beta
#############################################################################
# END OF YOUR CODE #
#############################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
#############################################################################
x, xmean, xvar, gamma, beta, sample_mean, sample_var, eps = cache
xnorm = xvar
dbeta = dout.sum(axis=0)
dgamma = (dout * xnorm).sum(axis=0)
dxvar = dout * gamma
denom = 1.0 / np.sqrt(sample_var + eps)
dxmean = dxvar * denom - xmean * np.power(denom, 3) * ((xmean * dxvar).sum(axis=0)) / x.shape[0]
dx = dxmean - dxmean.mean(axis=0)
# dxnorm = dout * gamma
# dsample_var = np.sum(-0.5 * dxnorm * xnorm / (sample_var + eps), axis =0)
# dsample_mean = np.sum(-1 / np.sqrt(sample_var+eps)* dxnorm, axis = 0)
# n = x.shape[0]
# dx = 1 / np.sqrt(sample_var + eps) * dxnorm + dsample_var * 2.0 / n * (x - sample_mean) + 1.0 / n * dsample_mean
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
#############################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line. #
#############################################################################
x, xmean, xvar, gamma, beta, sample_mean, sample_var, eps = cache
xnorm = xvar
dbeta = dout.sum(axis=0)
dgamma = (dout * xnorm).sum(axis=0)
dxvar = dout * gamma
denom = 1.0 / np.sqrt(sample_var + eps)
dxmean = dxvar * denom - xmean * np.power(denom, 3) * ((xmean * dxvar).sum(axis=0)) / x.shape[0]
dx = dxmean - dxmean.mean(axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
mask = np.random.rand(*x.shape)
mask[mask <= p] = 0.0
mask[mask > p] = 1.0 / (1.0 - p)
out = x * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
###########################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
###########################################################################
out = x
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase backward pass for inverted dropout. #
###########################################################################
dx = dout * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
stride = conv_param['stride']
pad = conv_param['pad']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
HN = 1 + (H + 2 * pad - HH) / stride
WN = 1 + (W + 2 * pad - WW) / stride
out = np.zeros((N, F, HN, WN))
cache = (x, w, b, conv_param)
#############################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
#############################################################################
for n in xrange(N):
xn = np.pad(x[n], ((0,0), (pad, pad), (pad, pad)), 'constant', constant_values=0)
for f in xrange(F):
wf = w[f, :, :, :]
bf = b[f]
for hn in xrange(HN):
for wn in xrange(WN):
hc = hn * stride
wc = wn * stride
out[n, f, hn, wn] = (xn[:, hc:hc+HH, wc:wc+WW] * wf).sum() + bf
#############################################################################
# END OF YOUR CODE #
#############################################################################
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
x, w, b, conv_param = cache
stride = conv_param['stride']
pad = conv_param['pad']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
HN = 1 + (H + 2 * pad - HH) / stride
WN = 1 + (W + 2 * pad - WW) / stride
dx = np.zeros(x.shape)
dw = np.zeros(w.shape)
db = np.zeros(b.shape)
#############################################################################
# TODO: Implement the convolutional backward pass. #
#############################################################################
for n in xrange(N):
xn = np.pad(x[n], ((0,0), (pad, pad), (pad, pad)), 'constant', constant_values=0)
dxn = np.zeros(xn.shape)
for f in xrange(F):
for hn in xrange(HN):
for wn in xrange(WN):
hc = hn * stride
wc = wn * stride
dxn[:, hc:hc+HH, wc:wc+WW] += w[f, :, :, :] * dout[n, f, hn, wn]
dw[f] += xn[:, hc:hc+HH, wc:wc+WW] * dout[n, f, hn, wn]
db[f] += dout[n, f, hn, wn]
dx[n] += dxn[:, pad:-pad, pad:-pad]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
ph = pool_param['pool_height']
pw = pool_param['pool_width']
stride = pool_param['stride']
N, C, H, W = x.shape
nh = (H - ph) / stride + 1
nw = (W - pw) / stride + 1
out = np.zeros((N, C, nh, nw))
#############################################################################
# TODO: Implement the max pooling forward pass #
#############################################################################
for n in xrange(N):
for c in xrange(C):
for h in xrange(nh):
for w in xrange(nw):
sh = h * stride
sw = w * stride
out[n, c, h, w] = np.max(x[n, c, sh:sh+stride, sw:sw+stride])
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
x, pool_param = cache
ph = pool_param['pool_height']
pw = pool_param['pool_width']
stride = pool_param['stride']
N, C, H, W = x.shape
nh = (H - ph) / stride + 1
nw = (W - pw) / stride + 1
dx = np.zeros(x.shape)
#############################################################################
# TODO: Implement the max pooling backward pass #
#############################################################################
for n in xrange(N):
for c in xrange(C):
for h in xrange(nh):
for w in xrange(nw):
sh = h * stride
sw = w * stride
region = x[n, c, sh:sh+stride, sw:sw+stride]
midx = np.unravel_index(region.argmax(), region.shape)
if stride > 1:
dx[n, c, sh + midx[0], sw + midx[1]] += dout[n, c, h, w]
else:
dx[n, c, sh, sw] += dout[n, c, h, w]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
N, C, H, W = x.shape
tmp, cache = batchnorm_forward(
x.transpose(0, 2, 3, 1).reshape((-1, C)), gamma, beta, bn_param)
out = tmp.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
N, C, H, W = dout.shape
dtmp, dgamma, dbeta = batchnorm_backward_alt(
dout.transpose(0, 2, 3, 1).reshape(-1, C), cache)
dx = dtmp.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.maximum(np.exp(x - np.max(x, axis=1, keepdims=True)), 1e-10)
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
controlzee/ansible | refs/heads/devel | test/integration/cleanup_gce.py | 163 | '''
Find and delete GCE resources matching the provided --match string. Unless
--yes|-y is provided, the prompt for confirmation prior to deleting resources.
Please use caution, you can easily delete your *ENTIRE* GCE infrastructure.
'''
import os
import re
import sys
import optparse
import yaml
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
import gce_credentials
def delete_gce_resources(get_func, attr, opts):
for item in get_func():
val = getattr(item, attr)
if re.search(opts.match_re, val, re.IGNORECASE):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'destroy'), "Class <%s> has no delete attribute" % item.__class__
if assumeyes:
item.destroy()
print ("Deleted %s" % item)
def parse_args():
parser = optparse.OptionParser(usage="%s [options]" % (sys.argv[0],),
description=__doc__)
gce_credentials.add_credentials_options(parser)
parser.add_option("--yes", "-y",
action="store_true", dest="assumeyes",
default=False,
help="Don't prompt for confirmation")
parser.add_option("--match",
action="store", dest="match_re",
default="^ansible-testing-",
help="Regular expression used to find GCE resources (default: %default)")
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
# Connect to GCE
gce = gce_credentials.get_gce_driver(opts)
try:
# Delete matching instances
delete_gce_resources(gce.list_nodes, 'name', opts)
# Delete matching snapshots
def get_snapshots():
for volume in gce.list_volumes():
for snapshot in gce.list_volume_snapshots(volume):
yield snapshot
delete_gce_resources(get_snapshots, 'name', opts)
# Delete matching disks
delete_gce_resources(gce.list_volumes, 'name', opts)
except KeyboardInterrupt as e:
print("\nExiting on user command.")
|
glewis17/fuel | refs/heads/master | fuel/converters/base.py | 13 | import os
import sys
from contextlib import contextmanager
from functools import wraps
import numpy
from progressbar import (ProgressBar, Percentage, Bar, ETA)
from fuel.datasets import H5PYDataset
from ..exceptions import MissingInputFiles
def check_exists(required_files):
"""Decorator that checks if required files exist before running.
Parameters
----------
required_files : list of str
A list of strings indicating the filenames of regular files
(not directories) that should be found in the input directory
(which is the first argument to the wrapped function).
Returns
-------
wrapper : function
A function that takes a function and returns a wrapped function.
The function returned by `wrapper` will include input file
existence verification.
Notes
-----
Assumes that the directory in which to find the input files is
provided as the first argument, with the argument name `directory`.
"""
def function_wrapper(f):
@wraps(f)
def wrapped(directory, *args, **kwargs):
missing = []
for filename in required_files:
if not os.path.isfile(os.path.join(directory, filename)):
missing.append(filename)
if len(missing) > 0:
raise MissingInputFiles('Required files missing', missing)
return f(directory, *args, **kwargs)
return wrapped
return function_wrapper
def fill_hdf5_file(h5file, data):
"""Fills an HDF5 file in a H5PYDataset-compatible manner.
Parameters
----------
h5file : :class:`h5py.File`
File handle for an HDF5 file.
data : tuple of tuple
One element per split/source pair. Each element consists of a
tuple of (split_name, source_name, data_array, comment), where
* 'split_name' is a string identifier for the split name
* 'source_name' is a string identifier for the source name
* 'data_array' is a :class:`numpy.ndarray` containing the data
for this split/source pair
* 'comment' is a comment string for the split/source pair
The 'comment' element can optionally be omitted.
"""
# Check that all sources for a split have the same length
split_names = set(split_tuple[0] for split_tuple in data)
for name in split_names:
lengths = [len(split_tuple[2]) for split_tuple in data
if split_tuple[0] == name]
if not all(l == lengths[0] for l in lengths):
raise ValueError("split '{}' has sources that ".format(name) +
"vary in length")
# Initialize split dictionary
split_dict = dict([(split_name, {}) for split_name in split_names])
# Compute total source lengths and check that splits have the same dtype
# across a source
source_names = set(split_tuple[1] for split_tuple in data)
for name in source_names:
splits = [s for s in data if s[1] == name]
indices = numpy.cumsum([0] + [len(s[2]) for s in splits])
if not all(s[2].dtype == splits[0][2].dtype for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in dtype")
if not all(s[2].shape[1:] == splits[0][2].shape[1:] for s in splits):
raise ValueError("source '{}' has splits that ".format(name) +
"vary in shapes")
dataset = h5file.create_dataset(
name, (sum(len(s[2]) for s in splits),) + splits[0][2].shape[1:],
dtype=splits[0][2].dtype)
dataset[...] = numpy.concatenate([s[2] for s in splits], axis=0)
for i, j, s in zip(indices[:-1], indices[1:], splits):
if len(s) == 4:
split_dict[s[0]][name] = (i, j, None, s[3])
else:
split_dict[s[0]][name] = (i, j)
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
@contextmanager
def progress_bar(name, maxval, prefix='Converting'):
"""Manages a progress bar for a conversion.
Parameters
----------
name : str
Name of the file being converted.
maxval : int
Total number of steps for the conversion.
"""
widgets = ['{} {}: '.format(prefix, name), Percentage(), ' ',
Bar(marker='=', left='[', right=']'), ' ', ETA()]
bar = ProgressBar(widgets=widgets, maxval=maxval, fd=sys.stdout).start()
try:
yield bar
finally:
bar.update(maxval)
bar.finish()
|
eugeneponomarenko/qualitybots | refs/heads/master | src/webdriver/webdriver_wrapper.py | 26 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap webdriver to perform actions with the browser."""
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.common import desired_capabilities
import chrome_resize
import client_logging
# Test timeout in seconds.
DEFAULT_TEST_TIMEOUT = 600
DEFAULT_WIDTH = 1024
DEFAULT_HEIGHT = 512
LOGGER_NAME = 'webdriver_wrapper'
# Initialize the logger for this module
logger = client_logging.GetLogger(LOGGER_NAME)
class SpawnError(Exception):
pass
class ExecutionError(Exception):
pass
class TimeoutError(Exception):
pass
class ChromeWithProfile(webdriver.Chrome):
"""
"""
def __init__(self, executable_path="chromedriver", port=0):
self.service = webdriver.chrome.service.Service('chromedriver', port=0)
self.service.start()
caps = desired_capabilities.DesiredCapabilities.CHROME
caps.update({'chrome.switches': ['--user-data-dir=%s' %
chrome_resize.GetChromeProfilePath()]})
webdriver.remote.webdriver.WebDriver.__init__(
self, command_executor=self.service.service_url,
desired_capabilities=caps)
class WebdriverWrapper(object):
"""A wrapper around webdriver used to control a browser.
Attributes:
_driver: The webdriver object used to control the browser.
"""
def __init__(self):
self._driver = None
def __del__(self):
self.KillDriver()
def AddCookies(self, cookies):
"""Add the given list of cookies to the browser.
Args:
cookies: A list of dictionaries describing the cookies to add. The
dictionaries should have the following attributes describing
the cookie: domain, secure, value, expiry, path, http_only, and name.
"""
if self._driver:
for cookie in cookies:
self._driver.add_cookie(cookie)
def DeleteAllCookies(self):
"""Delete all the cookies for the current domain that the browser is on."""
if self._driver:
self._driver.delete_all_cookies()
def ExecuteScript(self, script, timeout=DEFAULT_TEST_TIMEOUT):
"""Execute a given javascript script in the current browser.
Args:
script: A string representing the javascript to execute.
timeout: An optional integer specifying a timeout in seconds to set
for the script execution.
Returns:
The output from the script is returned. If the driver instance doesn't
exist, None is returned.
Raises:
ExecutionError: The javascript failed to execute properly.
"""
if not self._driver:
return None
try:
self._driver.set_script_timeout(timeout)
return self._driver.execute_script(script)
except exceptions.TimeoutException:
logger.exception('Timeout executing the script.')
raise TimeoutError('Timeout executing the script.')
except exceptions.WebDriverException:
logger.exception('Error executing the script.')
raise ExecutionError('Error executing the script.')
def GetScreenshot(self):
"""Takes a screenshot of the current page and returns it as a base64 string.
Returns:
A base64-encoded string representing a PNG image of the current page.
If there is no current driver, None is returned.
"""
if not self._driver:
return None
return self._driver.get_screenshot_as_base64()
def GetUserAgent(self):
"""Get the useragent from the current webdriver browser.
Returns:
A string that represents the useragent for the current webdriver browser.
"""
if not self._driver:
return None
try:
useragent = self.ExecuteScript('return window.navigator.userAgent;')
if useragent:
useragent.encode('ascii')
logger.info('Using browser with useragent "%s"', useragent)
return useragent
except ExecutionError:
logger.exception('There was an error trying to get the useragent.')
return None
def IsRunning(self):
"""Return a boolean indicating whether a webdriver instance is running."""
return self._driver is not None
def KillDriver(self):
"""If a webdriver instance exists, kill the instance."""
if self._driver:
self._driver.quit()
self._driver = None
def NavigateToSite(self, url):
"""Navigate the browser to the given site.
Args:
url: A string representing the URL of the site to navigate to.
Raises:
ExecutionError: The webdriver failed to navigate to the specified url.
"""
if not self._driver:
return
try:
self._driver.get(url)
except exceptions.WebDriverException:
logger.exception('Failed to navigate to the specified url: %s', url)
raise ExecutionError(
'Failed to navigate to the specified url: %s' % url)
def RefreshPage(self):
"""Refresh the current browser page."""
if self._driver:
self._driver.refresh()
def ResizeBrowser(self, width, height):
"""Attempt to resize the browser to the given width and height.
Args:
width: An int representing the requested width for the browser in pixels.
height: An int representing the requested height for the browser in
pixels.
"""
if not self._driver:
return None
try:
self.ExecuteScript('window.resizeTo(%(width)d, %(height)d);' %
{'width': width, 'height': height})
except ExecutionError:
logger.exception('There was an error trying to get resize the browser.')
# TODO(user): Add more browsers after they have been tested (IE, Android).
# TODO(user): Set the width and height based on each test's specifications.
def SpawnChromeDriver(self, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
"""Spawns a Chrome webdriver instance.
Raises:
SpawnException: There is an error spawning the instance.
"""
# Resize the browser through the profile
chrome_resize.SetChromeWindowSize(width, height)
self._SpawnWebDriver(ChromeWithProfile)
def SpawnFirefoxDriver(self):
"""Spawns a Firefox webdriver instance.
Raises:
SpawnError: There is an error spawning the instance.
"""
self._SpawnWebDriver(webdriver.Firefox)
def _SpawnWebDriver(self, driver_function):
"""Spawns a given webdriver instance.
Args:
driver_function: A function that can be called to spawn the requested
webdriver instance.
Raises:
SpawnException: There is an error spawning the instance.
"""
# Make sure we only have one webdriver instance running.
if self._driver:
self.KillDriver()
try:
self._driver = driver_function()
except exceptions.WebDriverException:
logger.exception('Failed to spawn a webdriver instance.')
raise SpawnError('Failed to spawn a webdriver instance.')
|
Curso-OpenShift/Formulario | refs/heads/master | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/pip/vcs/__init__.py | 170 | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.exceptions import BadCommand
from pip.utils import (display_path, backup_dir, call_subprocess,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
logger = logging.getLogger(__name__)
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
# List of supported schemes for this Version Control
schemes = ()
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
# See issue #1083 for why this method was introduced:
# https://github.com/pypa/pip/issues/1083
def translate_egg_surname(self, surname):
# For example, Django has branches of the form "stable/1.7.x".
return surname.replace('/', '_')
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
)
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplementedError
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_version(self, dest, rev_options):
"""
Return True if the version is identical to what exists and
doesn't need to be updated.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.check_version(dest, rev_options):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, rev_options)
else:
logger.info(
'Skipping because already up-to-date.')
else:
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
def get_url(self, location):
"""
Return the url used at location
Used in get_info or check_destination
"""
raise NotImplementedError
def get_revision(self, location):
"""
Return the current revision of the files at location
Used in get_info
"""
raise NotImplementedError
def run_command(self, cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [self.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode, command_level,
command_desc, extra_environ,
spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand('Cannot find command %r' % self.name)
else:
raise # re-raise exception if a different error occurred
@classmethod
def controls_location(cls, location):
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
"""
logger.debug('Checking in %s for %s (%s)...',
location, cls.dirname, cls.name)
path = os.path.join(location, cls.dirname)
return os.path.exists(path)
def get_src_requirement(dist, location):
version_control = vcs.get_backend_from_location(location)
if version_control:
try:
return version_control().get_src_requirement(dist,
location)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
version_control.name,
)
return dist.as_requirement()
logger.warning(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)',
location,
)
return dist.as_requirement()
|
morissette/devopsdays-hackathon-2016 | refs/heads/master | venv/lib/python2.7/site-packages/pip/vcs/__init__.py | 170 | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.exceptions import BadCommand
from pip.utils import (display_path, backup_dir, call_subprocess,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
logger = logging.getLogger(__name__)
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
# List of supported schemes for this Version Control
schemes = ()
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
# See issue #1083 for why this method was introduced:
# https://github.com/pypa/pip/issues/1083
def translate_egg_surname(self, surname):
# For example, Django has branches of the form "stable/1.7.x".
return surname.replace('/', '_')
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
)
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplementedError
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_version(self, dest, rev_options):
"""
Return True if the version is identical to what exists and
doesn't need to be updated.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.check_version(dest, rev_options):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, rev_options)
else:
logger.info(
'Skipping because already up-to-date.')
else:
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
checkout = True
return checkout
def unpack(self, location):
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
def get_url(self, location):
"""
Return the url used at location
Used in get_info or check_destination
"""
raise NotImplementedError
def get_revision(self, location):
"""
Return the current revision of the files at location
Used in get_info
"""
raise NotImplementedError
def run_command(self, cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [self.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode, command_level,
command_desc, extra_environ,
spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand('Cannot find command %r' % self.name)
else:
raise # re-raise exception if a different error occurred
@classmethod
def controls_location(cls, location):
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
"""
logger.debug('Checking in %s for %s (%s)...',
location, cls.dirname, cls.name)
path = os.path.join(location, cls.dirname)
return os.path.exists(path)
def get_src_requirement(dist, location):
version_control = vcs.get_backend_from_location(location)
if version_control:
try:
return version_control().get_src_requirement(dist,
location)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
version_control.name,
)
return dist.as_requirement()
logger.warning(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)',
location,
)
return dist.as_requirement()
|
nikolay-fedotov/networking-cisco | refs/heads/master | networking_cisco/tests/unit/ml2/drivers/cisco/nexus/__init__.py | 12133432 | |
CollabQ/CollabQ | refs/heads/master | .google_appengine/lib/django/django/contrib/flatpages/__init__.py | 12133432 | |
shail2810/nova | refs/heads/master | nova/tests/unit/servicegroup/__init__.py | 12133432 | |
zachallaun/zulip | refs/heads/master | confirmation/migrations/__init__.py | 12133432 | |
silentsokolov/django-treasuremap | refs/heads/master | treasuremap/backends/yandex.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from django.conf import settings
from .base import BaseMapBackend
class YandexMapBackend(BaseMapBackend):
NAME = 'yandex'
API_URL = '//api-maps.yandex.ru/2.1/'
def get_api_js(self):
params = OrderedDict()
params['lang'] = settings.LANGUAGE_CODE
if self.API_KEY:
params['pikey'] = self.API_KEY
return '{js_lib}?{params}'.format(js_lib=self.API_URL, params=urlencode(params))
|
mmalyska/eve-wspace | refs/heads/develop | evewspace/Teamspeak/__init__.py | 12133432 | |
mcella/django | refs/heads/master | tests/user_commands/management/commands/leave_locale_alone_true.py | 428 | from django.core.management.base import BaseCommand
from django.utils import translation
class Command(BaseCommand):
can_import_settings = True
leave_locale_alone = True
def handle(self, *args, **options):
return translation.get_language()
|
yosefm/postptv | refs/heads/master | scripts/analyse_fhdf.py | 2 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Runs an analysis of a tracer/inertial scene encoded by two FHDF files.
Created on Sun Aug 10 17:22:04 2014
@author: yosef
"""
from flowtracks.scene import read_dual_scene
from flowtracks.analysis import analysis, FluidVelocitiesAnalyser
if __name__ == "__main__":
from flowtracks.interpolation import interpolant
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', help="file containing configuration values.")
parser.add_argument('output', help="A directory where the generated data "\
"should be saved as one file per trajectory.")
parser.add_argument("--method", choices=['inv', 'rbf', 'corrfun'],
default='inv', help="Interpolation method (*inv*, rbf, corrfun)")
parser.add_argument("--neighbs", type=int, help="Number of closest "
"neighbours from which to interpolate.", default=4)
parser.add_argument('--param', '-p', help="Interpolation adjustment "
"parameter. Inverse power for inv, epsilon for RBF, filename for corrfun")
args = parser.parse_args()
interp = interpolant(args.method, args.neighbs, args.param)
scene = read_dual_scene(args.config)
analysers = [ FluidVelocitiesAnalyser(interp) ]
analysis(scene, args.output, args.config, analysers)
|
benpatterson/edx-platform | refs/heads/master | lms/djangoapps/courseware/tests/test_model_data.py | 43 | """
Test for lms courseware app, module data (runtime data storage for XBlocks)
"""
import json
from mock import Mock, patch
from nose.plugins.attrib import attr
from functools import partial
from courseware.model_data import DjangoKeyValueStore, FieldDataCache, InvalidScopeError
from courseware.models import StudentModule
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.tests.factories import UserFactory
from courseware.tests.factories import StudentModuleFactory as cmfStudentModuleFactory, location, course_id
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory
from xblock.fields import Scope, BlockScope, ScopeIds
from xblock.exceptions import KeyValueMultiSaveError
from xblock.core import XBlock
from django.test import TestCase
from django.db import DatabaseError
def mock_field(scope, name):
field = Mock()
field.scope = scope
field.name = name
return field
def mock_descriptor(fields=[]):
descriptor = Mock(entry_point=XBlock.entry_point)
descriptor.scope_ids = ScopeIds('user1', 'mock_problem', location('def_id'), location('usage_id'))
descriptor.module_class.fields.values.return_value = fields
descriptor.fields.values.return_value = fields
descriptor.module_class.__name__ = 'MockProblemModule'
return descriptor
# The user ids here are 1 because we make a student in the setUp functions, and
# they get an id of 1. There's an assertion in setUp to ensure that assumption
# is still true.
user_state_summary_key = partial(DjangoKeyValueStore.Key, Scope.user_state_summary, None, location('usage_id'))
settings_key = partial(DjangoKeyValueStore.Key, Scope.settings, None, location('usage_id'))
user_state_key = partial(DjangoKeyValueStore.Key, Scope.user_state, 1, location('usage_id'))
prefs_key = partial(DjangoKeyValueStore.Key, Scope.preferences, 1, 'mock_problem')
user_info_key = partial(DjangoKeyValueStore.Key, Scope.user_info, 1, None)
class StudentModuleFactory(cmfStudentModuleFactory):
module_state_key = location('usage_id')
course_id = course_id
@attr('shard_1')
class TestInvalidScopes(TestCase):
def setUp(self):
super(TestInvalidScopes, self).setUp()
self.user = UserFactory.create(username='user')
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_invalid_scopes(self):
for scope in (Scope(user=True, block=BlockScope.DEFINITION),
Scope(user=False, block=BlockScope.TYPE),
Scope(user=False, block=BlockScope.ALL)):
key = DjangoKeyValueStore.Key(scope, None, None, 'field')
self.assertRaises(InvalidScopeError, self.kvs.get, key)
self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, key)
self.assertRaises(InvalidScopeError, self.kvs.has, key)
self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
@attr('shard_1')
class OtherUserFailureTestMixin(object):
"""
Mixin class to add test cases for failures when a user trying to use the kvs is not
the one that instantiated the kvs.
Doing a mixin rather than modifying StorageTestBase (below) because some scopes don't fail in this case, because
they aren't bound to a particular user
assumes that this is mixed into a class that defines other_key_factory and existing_field_name
"""
def test_other_user_kvs_get_failure(self):
"""
Test for assert failure when a user who didn't create the kvs tries to get from it it
"""
with self.assertRaises(AssertionError):
self.kvs.get(self.other_key_factory(self.existing_field_name))
def test_other_user_kvs_set_failure(self):
"""
Test for assert failure when a user who didn't create the kvs tries to get from it it
"""
with self.assertRaises(AssertionError):
self.kvs.set(self.other_key_factory(self.existing_field_name), "new_value")
@attr('shard_1')
class TestStudentModuleStorage(OtherUserFailureTestMixin, TestCase):
"""Tests for user_state storage via StudentModule"""
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.user_state, 2, location('usage_id')) # user_id=2, not 1
existing_field_name = "a_field"
def setUp(self):
super(TestStudentModuleStorage, self).setUp()
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'}))
self.user = student_module.student
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
# There should be only one query to load a single descriptor with a single user_state field
with self.assertNumQueries(1):
self.field_data_cache = FieldDataCache(
[mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user
)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_existing_field(self):
"Test that getting an existing field in an existing StudentModule works"
# This should only read from the cache, not the database
with self.assertNumQueries(0):
self.assertEquals('a_value', self.kvs.get(user_state_key('a_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing StudentModule raises a KeyError"
# This should only read from the cache, not the database
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_set_existing_field(self):
"Test that setting an existing user_state field changes the value"
# We are updating a problem, so we write to courseware_studentmodulehistory
# as well as courseware_studentmodule. We also need to read the database
# to discover if something other than the DjangoXBlockUserStateClient
# has written to the StudentModule (such as UserStateCache setting the score
# on the StudentModule).
with self.assertNumQueries(3):
self.kvs.set(user_state_key('a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_set_missing_field(self):
"Test that setting a new user_state field changes the value"
# We are updating a problem, so we write to courseware_studentmodulehistory
# as well as courseware_studentmodule. We also need to read the database
# to discover if something other than the DjangoXBlockUserStateClient
# has written to the StudentModule (such as UserStateCache setting the score
# on the StudentModule).
with self.assertNumQueries(3):
self.kvs.set(user_state_key('not_a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it from the StudentModule"
# We are updating a problem, so we write to courseware_studentmodulehistory
# as well as courseware_studentmodule. We also need to read the database
# to discover if something other than the DjangoXBlockUserStateClient
# has written to the StudentModule (such as UserStateCache setting the score
# on the StudentModule).
with self.assertNumQueries(3):
self.kvs.delete(user_state_key('a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing StudentModule raises a KeyError"
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_has_existing_field(self):
"Test that `has` returns True for existing fields in StudentModules"
with self.assertNumQueries(0):
self.assertTrue(self.kvs.has(user_state_key('a_field')))
def test_has_missing_field(self):
"Test that `has` returns False for missing fields in StudentModule"
with self.assertNumQueries(0):
self.assertFalse(self.kvs.has(user_state_key('not_a_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = user_state_key('field_a')
key2 = user_state_key('field_b')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"Test setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# Scope.user_state is stored in a single row in the database, so we only
# need to send a single update to that table.
# We also are updating a problem, so we write to courseware student module history
# We also need to read the database to discover if something other than the
# DjangoXBlockUserStateClient has written to the StudentModule (such as
# UserStateCache setting the score on the StudentModule).
with self.assertNumQueries(3):
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"Test failures when setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# because we're patching the underlying save, we need to ensure the
# fields are in the cache
for key in kv_dict:
self.kvs.set(key, 'test_value')
with patch('django.db.models.Model.save', side_effect=DatabaseError):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
self.assertEquals(exception_context.exception.saved_field_names, [])
@attr('shard_1')
class TestMissingStudentModule(TestCase):
def setUp(self):
super(TestMissingStudentModule, self).setUp()
self.user = UserFactory.create(username='user')
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
# The descriptor has no fields, so FDC shouldn't send any queries
with self.assertNumQueries(0):
self.field_data_cache = FieldDataCache([mock_descriptor()], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_field_from_missing_student_module(self):
"Test that getting a field from a missing StudentModule raises a KeyError"
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.get, user_state_key('a_field'))
def test_set_field_in_missing_student_module(self):
"Test that setting a field in a missing StudentModule creates the student module"
self.assertEquals(0, len(self.field_data_cache))
self.assertEquals(0, StudentModule.objects.all().count())
# We are updating a problem, so we write to courseware_studentmodulehistory
# as well as courseware_studentmodule. We also need to read the database
# to discover if something other than the DjangoXBlockUserStateClient
# has written to the StudentModule (such as UserStateCache setting the score
# on the StudentModule).
with self.assertNumQueries(3):
self.kvs.set(user_state_key('a_field'), 'a_value')
self.assertEquals(1, sum(len(cache) for cache in self.field_data_cache.cache.values()))
self.assertEquals(1, StudentModule.objects.all().count())
student_module = StudentModule.objects.all()[0]
self.assertEquals({'a_field': 'a_value'}, json.loads(student_module.state))
self.assertEquals(self.user, student_module.student)
self.assertEquals(location('usage_id').replace(run=None), student_module.module_state_key)
self.assertEquals(course_id, student_module.course_id)
def test_delete_field_from_missing_student_module(self):
"Test that deleting a field from a missing StudentModule raises a KeyError"
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.delete, user_state_key('a_field'))
def test_has_field_for_missing_student_module(self):
"Test that `has` returns False for missing StudentModules"
with self.assertNumQueries(0):
self.assertFalse(self.kvs.has(user_state_key('a_field')))
@attr('shard_1')
class StorageTestBase(object):
"""
A base class for that gets subclassed when testing each of the scopes.
"""
# Disable pylint warnings that arise because of the way the child classes call
# this base class -- pylint's static analysis can't keep up with it.
# pylint: disable=no-member, not-callable
factory = None
scope = None
key_factory = None
storage_class = None
def setUp(self):
field_storage = self.factory.create()
if hasattr(field_storage, 'student'):
self.user = field_storage.student
else:
self.user = UserFactory.create()
self.mock_descriptor = mock_descriptor([
mock_field(self.scope, 'existing_field'),
mock_field(self.scope, 'other_existing_field')])
# Each field is stored as a separate row in the table,
# but we can query them in a single query
with self.assertNumQueries(1):
self.field_data_cache = FieldDataCache([self.mock_descriptor], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_set_and_get_existing_field(self):
with self.assertNumQueries(1):
self.kvs.set(self.key_factory('existing_field'), 'test_value')
with self.assertNumQueries(0):
self.assertEquals('test_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_existing_field(self):
"Test that getting an existing field in an existing Storage Field works"
with self.assertNumQueries(0):
self.assertEquals('old_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing Storage Field raises a KeyError"
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.get, self.key_factory('missing_field'))
def test_set_existing_field(self):
"Test that setting an existing field changes the value"
with self.assertNumQueries(1):
self.kvs.set(self.key_factory('existing_field'), 'new_value')
self.assertEquals(1, self.storage_class.objects.all().count())
self.assertEquals('new_value', json.loads(self.storage_class.objects.all()[0].value))
def test_set_missing_field(self):
"Test that setting a new field changes the value"
with self.assertNumQueries(1):
self.kvs.set(self.key_factory('missing_field'), 'new_value')
self.assertEquals(2, self.storage_class.objects.all().count())
self.assertEquals('old_value', json.loads(self.storage_class.objects.get(field_name='existing_field').value))
self.assertEquals('new_value', json.loads(self.storage_class.objects.get(field_name='missing_field').value))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it"
with self.assertNumQueries(1):
self.kvs.delete(self.key_factory('existing_field'))
self.assertEquals(0, self.storage_class.objects.all().count())
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing Storage Field raises a KeyError"
with self.assertNumQueries(0):
self.assertRaises(KeyError, self.kvs.delete, self.key_factory('missing_field'))
self.assertEquals(1, self.storage_class.objects.all().count())
def test_has_existing_field(self):
"Test that `has` returns True for an existing Storage Field"
with self.assertNumQueries(0):
self.assertTrue(self.kvs.has(self.key_factory('existing_field')))
def test_has_missing_field(self):
"Test that `has` return False for an existing Storage Field"
with self.assertNumQueries(0):
self.assertFalse(self.kvs.has(self.key_factory('missing_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = self.key_factory('existing_field')
key2 = self.key_factory('other_existing_field')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"""Test that setting many regular fields at the same time works"""
kv_dict = self.construct_kv_dict()
# Each field is a separate row in the database, hence
# a separate query
with self.assertNumQueries(len(kv_dict)):
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"""Test that setting many regular fields with a DB error """
kv_dict = self.construct_kv_dict()
for key in kv_dict:
with self.assertNumQueries(1):
self.kvs.set(key, 'test value')
with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
exception = exception_context.exception
self.assertEquals(exception.saved_field_names, ['existing_field', 'other_existing_field'])
class TestUserStateSummaryStorage(StorageTestBase, TestCase):
"""Tests for UserStateSummaryStorage"""
factory = UserStateSummaryFactory
scope = Scope.user_state_summary
key_factory = user_state_summary_key
storage_class = factory.FACTORY_FOR
class TestStudentPrefsStorage(OtherUserFailureTestMixin, StorageTestBase, TestCase):
"""Tests for StudentPrefStorage"""
factory = StudentPrefsFactory
scope = Scope.preferences
key_factory = prefs_key
storage_class = XModuleStudentPrefsField
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.preferences, 2, 'mock_problem') # user_id=2, not 1
existing_field_name = "existing_field"
class TestStudentInfoStorage(OtherUserFailureTestMixin, StorageTestBase, TestCase):
"""Tests for StudentInfoStorage"""
factory = StudentInfoFactory
scope = Scope.user_info
key_factory = user_info_key
storage_class = XModuleStudentInfoField
other_key_factory = partial(DjangoKeyValueStore.Key, Scope.user_info, 2, 'mock_problem') # user_id=2, not 1
existing_field_name = "existing_field"
|
eyohansa/django | refs/heads/master | tests/i18n/urls.py | 205 | from __future__ import unicode_literals
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponse, StreamingHttpResponse
from django.test import ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.translation import ugettext_lazy as _
# test deprecated version of i18n_patterns() function (with prefix). Remove it
# and convert to list of urls() in Django 1.10
i18n_patterns = ignore_warnings(category=RemovedInDjango110Warning)(i18n_patterns)
urlpatterns = i18n_patterns('',
(r'^simple/$', lambda r: HttpResponse()),
(r'^streaming/$', lambda r: StreamingHttpResponse([_("Yes"), "/", _("No")])),
)
|
linjoahow/w17test_1 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_break.py | 785 | import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
RockySteveJobs/python-for-android | refs/heads/master | python-build/python-libs/gdata/build/lib/atom/http_interface.py | 133 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a common interface for all HTTP requests.
HttpResponse: Represents the server's response to an HTTP request. Provides
an interface identical to httplib.HTTPResponse which is the response
expected from higher level classes which use HttpClient.request.
GenericHttpClient: Provides an interface (superclass) for an object
responsible for making HTTP requests. Subclasses of this object are
used in AtomService and GDataService to make requests to the server. By
changing the http_client member object, the AtomService is able to make
HTTP requests using different logic (for example, when running on
Google App Engine, the http_client makes requests using the App Engine
urlfetch API).
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
USER_AGENT = '%s GData-Python/1.3.3'
class Error(Exception):
pass
class UnparsableUrlObject(Error):
pass
class ContentLengthRequired(Error):
pass
class HttpResponse(object):
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Constructor for an HttpResponse object.
HttpResponse represents the server's response to an HTTP request from
the client. The HttpClient.request method returns a httplib.HTTPResponse
object and this HttpResponse class is designed to mirror the interface
exposed by httplib.HTTPResponse.
Args:
body: A file like object, with a read() method. The body could also
be a string, and the constructor will wrap it so that
HttpResponse.read(self) will return the full string.
status: The HTTP status code as an int. Example: 200, 201, 404.
reason: The HTTP status message which follows the code. Example:
OK, Created, Not Found
headers: A dictionary containing the HTTP headers in the server's
response. A common header in the response is Content-Length.
"""
if body:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
else:
self._body = None
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def read(self, amt=None):
if not amt:
return self._body.read()
else:
return self._body.read(amt)
class GenericHttpClient(object):
debug = False
def __init__(self, http_client, headers=None):
"""
Args:
http_client: An object which provides a request method to make an HTTP
request. The request method in GenericHttpClient performs a
call-through to the contained HTTP client object.
headers: A dictionary containing HTTP headers which should be included
in every HTTP request. Common persistent headers include
'User-Agent'.
"""
self.http_client = http_client
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
return self.http_client.request(operation, url, data=data,
headers=all_headers)
def get(self, url, headers=None):
return self.request('GET', url, headers=headers)
def post(self, url, data, headers=None):
return self.request('POST', url, data=data, headers=headers)
def put(self, url, data, headers=None):
return self.request('PUT', url, data=data, headers=headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers=headers)
class GenericToken(object):
"""Represents an Authorization token to be added to HTTP requests.
Some Authorization headers included calculated fields (digital
signatures for example) which are based on the parameters of the HTTP
request. Therefore the token is responsible for signing the request
and adding the Authorization header.
"""
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""For the GenericToken, no Authorization token is set."""
return http_client.request(operation, url, data=data, headers=headers)
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
Since the generic token doesn't add an auth header, it is not valid for
any scope.
"""
return False
|
sebrandon1/neutron | refs/heads/master | neutron/agent/l3/ha_router.py | 3 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import netaddr
from neutron_lib import constants as n_consts
from oslo_log import log as logging
from neutron._i18n import _LE
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import utils as common_utils
from neutron.extensions import portbindings
LOG = logging.getLogger(__name__)
HA_DEV_PREFIX = 'ha-'
IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
class HaRouter(router.RouterInfo):
def __init__(self, state_change_callback, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
self.state_change_callback = state_change_callback
@property
def ha_priority(self):
return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
@property
def ha_vr_id(self):
return self.router.get('ha_vr_id')
@property
def ha_state(self):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'r') as f:
return f.read()
except (OSError, IOError):
LOG.debug('Error while reading HA state for %s', self.router_id)
return None
@ha_state.setter
def ha_state(self, new_state):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError):
LOG.error(_LE('Error while writing HA state for %s'),
self.router_id)
@property
def ha_namespace(self):
return self.ns_name
def initialize(self, process_monitor):
super(HaRouter, self).initialize(process_monitor)
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
LOG.error(_LE('Unable to process HA router %s without HA port'),
self.router_id)
return
self.ha_port = ha_port
self._init_keepalived_manager(process_monitor)
self.ha_network_added()
self.update_initial_state(self.state_change_callback)
self.spawn_state_change_monitor(process_monitor)
def _init_keepalived_manager(self, process_monitor):
self.keepalived_manager = keepalived.KeepalivedManager(
self.router['id'],
keepalived.KeepalivedConf(),
process_monitor,
conf_path=self.agent_conf.ha_confs_path,
namespace=self.ha_namespace)
config = self.keepalived_manager.config
interface_name = self.get_ha_device_name()
subnets = self.ha_port.get('subnets', [])
ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
instance = keepalived.KeepalivedInstance(
'BACKUP',
interface_name,
self.ha_vr_id,
ha_port_cidrs,
nopreempt=True,
advert_int=self.agent_conf.ha_vrrp_advert_int,
priority=self.ha_priority)
instance.track_interfaces.append(interface_name)
if self.agent_conf.ha_vrrp_auth_password:
# TODO(safchain): use oslo.config types when it will be available
# in order to check the validity of ha_vrrp_auth_type
instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
self.agent_conf.ha_vrrp_auth_password)
config.add_instance(instance)
def enable_keepalived(self):
self.keepalived_manager.spawn()
def disable_keepalived(self):
self.keepalived_manager.disable()
conf_dir = self.keepalived_manager.get_conf_dir()
shutil.rmtree(conf_dir)
def _get_keepalived_instance(self):
return self.keepalived_manager.config.get_instance(self.ha_vr_id)
def _get_primary_vip(self):
return self._get_keepalived_instance().get_primary_vip()
def get_ha_device_name(self):
return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
def ha_network_added(self):
interface_name = self.get_ha_device_name()
self.driver.plug(self.ha_port['network_id'],
self.ha_port['id'],
interface_name,
self.ha_port['mac_address'],
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX,
mtu=self.ha_port.get('mtu'))
ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs,
namespace=self.ha_namespace,
preserve_ips=[self._get_primary_vip()])
def ha_network_removed(self):
self.driver.unplug(self.get_ha_device_name(),
namespace=self.ha_namespace,
prefix=HA_DEV_PREFIX)
self.ha_port = None
def _add_vips(self, port, interface_name):
for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
def _add_vip(self, ip_cidr, interface, scope=None):
instance = self._get_keepalived_instance()
instance.add_vip(ip_cidr, interface, scope)
def _remove_vip(self, ip_cidr):
instance = self._get_keepalived_instance()
instance.remove_vip_by_ip_address(ip_cidr)
def _clear_vips(self, interface):
instance = self._get_keepalived_instance()
instance.remove_vips_vroutes_by_interface(interface)
def _get_cidrs_from_keepalived(self, interface_name):
instance = self._get_keepalived_instance()
return instance.get_existing_vip_ip_addresses(interface_name)
def get_router_cidrs(self, device):
return set(self._get_cidrs_from_keepalived(device.name))
def routes_updated(self, old_routes, new_routes):
instance = self._get_keepalived_instance()
instance.virtual_routes.extra_routes = [
keepalived.KeepalivedVirtualRoute(
route['destination'], route['nexthop'])
for route in new_routes]
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
gateway_ips = self._get_external_gw_ips(ex_gw_port)
default_gw_rts = []
instance = self._get_keepalived_instance()
for gw_ip in gateway_ips:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version]
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
instance.virtual_routes.extra_subnets = [
keepalived.KeepalivedVirtualRoute(
onlink_route_cidr, None, interface_name, scope='link') for
onlink_route_cidr in onlink_route_cidrs]
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the master should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
it manage IPv4 addresses. If the router is not in the master state,
we must delete the address first as it is autoconfigured by the kernel.
"""
manager = self.keepalived_manager
if manager.get_process().active:
if self.ha_state != 'master':
conf = manager.get_conf_on_disk()
managed_by_keepalived = conf and ipv6_lladdr in conf
if managed_by_keepalived:
return False
else:
return False
return True
def _disable_ipv6_addressing_on_interface(self, interface_name):
"""Disable IPv6 link local addressing on the device and add it as
a VIP to keepalived. This means that the IPv6 link local address
will only be present on the master.
"""
device = ip_lib.IPDevice(interface_name, namespace=self.ha_namespace)
ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
if self._should_delete_ipv6_lladdr(ipv6_lladdr):
device.addr.flush(n_consts.IP_VERSION_6)
self._remove_vip(ipv6_lladdr)
self._add_vip(ipv6_lladdr, interface_name, scope='link')
def _add_gateway_vip(self, ex_gw_port, interface_name):
self._add_vips(ex_gw_port, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
self._add_vip(ip_cidr, interface_name)
return n_consts.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
self._remove_vip(ip_cidr)
if self.ha_state == 'master' and device.addr.list(to=ip_cidr):
# Delete the floatingip address from external port only after
# the ip address has been configured to the device
super(HaRouter, self).remove_floating_ip(device, ip_cidr)
def internal_network_updated(self, interface_name, ip_cidrs):
self._clear_vips(interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in ip_cidrs:
self._add_vip(ip_cidr, interface_name)
def _plug_ha_router_port(self, port, name_getter, prefix):
port_id = port['id']
interface_name = name_getter(port_id)
self.driver.plug(port['network_id'],
port_id,
interface_name,
port['mac_address'],
namespace=self.ha_namespace,
prefix=prefix,
mtu=port.get('mtu'))
self._disable_ipv6_addressing_on_interface(interface_name)
self._add_vips(port, interface_name)
def internal_network_added(self, port):
self._plug_ha_router_port(
port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
def _get_state_change_monitor_process_manager(self):
return external_process.ProcessManager(
self.agent_conf,
'%s.monitor' % self.router_id,
self.ha_namespace,
default_cmd_callback=self._get_state_change_monitor_callback())
def _get_state_change_monitor_callback(self):
ha_device = self.get_ha_device_name()
ha_cidr = self._get_primary_vip()
def callback(pid_file):
cmd = [
'neutron-keepalived-state-change',
'--router_id=%s' % self.router_id,
'--namespace=%s' % self.ha_namespace,
'--conf_dir=%s' % self.keepalived_manager.get_conf_dir(),
'--monitor_interface=%s' % ha_device,
'--monitor_cidr=%s' % ha_cidr,
'--pid_file=%s' % pid_file,
'--state_path=%s' % self.agent_conf.state_path,
'--user=%s' % os.geteuid(),
'--group=%s' % os.getegid()]
return cmd
return callback
def spawn_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
pm.enable()
process_monitor.register(
self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
def destroy_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
process_monitor.unregister(
self.router_id, IP_MONITOR_PROCESS_SERVICE)
pm.disable()
def update_initial_state(self, callback):
ha_device = ip_lib.IPDevice(
self.get_ha_device_name(),
self.ha_namespace)
addresses = ha_device.addr.list()
cidrs = (address['cidr'] for address in addresses)
ha_cidr = self._get_primary_vip()
state = 'master' if ha_cidr in cidrs else 'backup'
self.ha_state = state
callback(self.router_id, state)
@staticmethod
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return {k: v for k, v in d.items() if k not in ignore}
keys_to_ignore = set([portbindings.HOST_ID])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
if self.ha_state == 'master':
self._enable_ra_on_gw(ex_gw_port, self.ns_name, interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
self._plug_external_gateway(
ex_gw_port, interface_name, self.ha_namespace)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
for old_gateway_cidr in ip_cidrs:
self._remove_vip(old_gateway_cidr)
self._add_gateway_vip(ex_gw_port, interface_name)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._clear_vips(interface_name)
if self.ha_state == 'master':
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
else:
# We are not the master node, so no need to delete ip addresses.
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=router.EXTERNAL_DEV_PREFIX)
def delete(self, agent):
super(HaRouter, self).delete(agent)
self.destroy_state_change_monitor(self.process_monitor)
self.ha_network_removed()
self.disable_keepalived()
def process(self, agent):
super(HaRouter, self).process(agent)
self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if (self.ha_port and
self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE):
self.enable_keepalived()
@common_utils.synchronized('enable_radvd')
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'master'):
super(HaRouter, self).enable_radvd(internal_ports)
|
darconny/liveblog | refs/heads/master | server/app.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import jinja2
from liveblog.embed import embed_blueprint
from flask.ext.cache import Cache
from liveblog.common import BlogCache
import flask_s3
import os
import settings
from superdesk.factory import get_app as superdesk_app
def get_app(config=None):
"""App factory.
:param config: configuration that can override config from `settings.py`
:return: a new SuperdeskEve app instance
"""
if config is None:
config = {}
config['APP_ABSPATH'] = os.path.abspath(os.path.dirname(__file__))
for key in dir(settings):
if key.isupper():
config.setdefault(key, getattr(settings, key))
media_storage = None
if config['AMAZON_CONTAINER_NAME']:
from superdesk.storage.amazon.amazon_media_storage import AmazonMediaStorage
media_storage = AmazonMediaStorage
config['DOMAIN'] = {}
app = superdesk_app(config, media_storage)
custom_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader('superdesk/templates'),
app.jinja_loader
])
app.jinja_loader = custom_loader
# cache
app.cache = Cache(app, config={'CACHE_TYPE': 'simple'})
app.blog_cache = BlogCache(cache=app.cache)
# s3
s3 = flask_s3.FlaskS3()
s3.init_app(app)
# embed feature
app.register_blueprint(embed_blueprint)
return app
if __name__ == '__main__':
debug = True
host = '0.0.0.0'
port = int(os.environ.get('PORT', '5000'))
app = get_app()
app.run(host=host, port=port, debug=debug, use_reloader=debug)
|
tobegit3hub/keystone_docker | refs/heads/master | keystone/common/sql/migrate_repo/versions/058_placeholder.py | 70 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Juno backports. Do not use this number for new
# Kilo work. New Kilo work starts after all the placeholders.
def upgrade(migrate_engine):
pass
|
NeuralEnsemble/neuroConstruct | refs/heads/master | lib/jython/Lib/test/test_shutil.py | 14 | # Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats)
import tarfile
import warnings
from test import test_support
from test.test_support import TESTFN, check_warnings, captured_stdout
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
# XXX: Fails on Jython because Java resets the S_IREAD permission
# when removing the file
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)
and (not test_support.is_jython or os._name != 'nt')):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write("spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_main():
test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
|
fernandog/Medusa | refs/heads/optimized | ext/feedparser/parsers/strict.py | 43 | # The strict feed parser that interfaces with an XML parsing library
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from ..exceptions import UndeclaredNamespace
class _StrictFeedParser(object):
def __init__(self, baseuri, baselang, encoding):
self.bozo = 0
self.exc = None
self.decls = {}
self.baseuri = baseuri or ''
self.lang = baselang
self.encoding = encoding
super(_StrictFeedParser, self).__init__()
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
return (k, v)
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix)
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, list(attrsD.items()))
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
|
pyKun/rally | refs/heads/master | rally/plugins/openstack/scenarios/tempest/__init__.py | 12133432 | |
shakamunyi/nova | refs/heads/master | nova/api/openstack/compute/plugins/v3/__init__.py | 12133432 | |
rotoudjimaye/django-jy | refs/heads/master | src/main/python/djangojy/db/backends/postgresql/__init__.py | 12133432 | |
sagarc/zookeeperGla | refs/heads/master | contrib/zkpython/src/test/connection_test.py | 26 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, threading
import zookeeper, zktestbase
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class ConnectionTest(zktestbase.TestBase):
"""Test whether we can make a connection"""
def setUp(self):
pass
def testconnection(self):
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle))
self.assertEqual(zookeeper.close(self.handle), zookeeper.OK)
# Trying to close the same handle twice is an error, and the C library will segfault on it
# so make sure this is caught at the Python module layer
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.close,
self.handle)
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.get,
self.handle,
"/")
def testhandlereuse(self):
"""
Test a) multiple concurrent connections b) reuse of closed handles
"""
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
handles = [ zookeeper.init(self.host) for i in xrange(10) ]
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(True, all( [ zookeeper.state(handle) == zookeeper.CONNECTED_STATE for handle in handles ] ),
"Not all connections succeeded")
oldhandle = handles[3]
zookeeper.close(oldhandle)
newhandle = zookeeper.init(self.host)
# This assertion tests *internal* behaviour; i.e. that the module
# correctly reuses closed handles. This is therefore implementation
# dependent.
self.assertEqual(newhandle, oldhandle, "Didn't get reused handle")
def testmanyhandles(self):
"""
Test the ability of the module to support many handles.
"""
# We'd like to do more, but currently the C client doesn't
# work with > 83 handles (fails to create a pipe) on MacOS 10.5.8
handles = [ zookeeper.init(self.host) for i in xrange(63) ]
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
for i,h in enumerate(handles):
path = "/zkpython-test-handles-%s" % str(i)
self.assertEqual(path, zookeeper.create(h, path, "", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL))
self.assertEqual(True, all( zookeeper.close(h) == zookeeper.OK for h in handles ))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
ryfeus/lambda-packs | refs/heads/master | Tensorflow_LightGBM_Scipy_nightly/source/numpy/lib/recfunctions.py | 10 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
Similar to dtype.descr, but the second item of each tuple is a dtype, not a
string. As a result, this handles subarray dtypes
Can be passed to the dtype constructor to reconstruct the dtype, noting that
this (deliberately) discards field offsets.
Examples
--------
>>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
>>> dt.descr
[(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
>>> get_fieldspec(dt)
[(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
# .descr returns a nameless field, so we should too
return [('', dtype)]
else:
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
(name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return (('', ndtype),)
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
if current.names and len(current.names) <= 1:
# special case - dtypes of 0 or 1 field are flattened
newdtype.extend(get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
return zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
if sys.version_info[0] >= 3:
zip_longest = itertools.zip_longest
else:
zip_longest = itertools.izip_longest
for tup in zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
depending on what its corresponding type:
* ``-1`` for integers
* ``-1.0`` for floating point numbers
* ``'-'`` for characters
* ``'-1'`` for strings
* ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
if not seqdtype.names:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
"""
Return a new array keeping only the fields in `keep_names`,
and preserving the order of those fields.
Parameters
----------
base : array
Input array
keep_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to keep. Order of the names will be preserved.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
"""
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(
max(len(base), len(data)),
dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1,i4,f4', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True)
>>> print_offsets(dt)
offsets: [0, 4, 8]
itemsize: 16
>>> packed_dt = repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 5]
itemsize: 13
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
for fname, fdtype in get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
_, cdtype = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
(cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
if len(set(key)) != len(key):
dup = next(x for n,x in enumerate(key) if x in key[n+1:])
raise ValueError("duplicate join key %r" % dup)
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %r' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %r' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
collisions = (set(r1names) & set(r2names)) - set(key)
if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
# (use order of keys in `r1` for back-compatibility)
key1 = [ n for n in r1names if n in key ]
r1k = _keep_fields(r1, key1)
r2k = _keep_fields(r2, key1)
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = get_fieldspec(r1k.dtype)
# Add the fields from r1
for fname, fdtype in get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
for fname, fdtype in get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
try:
nameidx = names.index(fname)
except ValueError:
#... we haven't: just add the description to the current list
ndtype.append((fname, fdtype))
else:
# collision
_, cdtype = ndtype[nameidx]
if fname in key:
# The current field is part of the key: take the largest dtype
ndtype[nameidx] = (fname, max(fdtype, cdtype))
else:
# The current field is not part of the key: add the suffixes,
# and place the new field adjacent to the old one
ndtype[nameidx:nameidx + 1] = [
(fname + r1postfix, cdtype),
(fname + r2postfix, fdtype)
]
# Rebuild a dtype from the new fields
ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/tickformatstop/_value.py | 1 | import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="scattergeo.marker.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
MetSystem/PTVS | refs/heads/master | Python/Tests/TestData/Grammar/MixedWhitespace4.py | 18 | if True:
print('hello')
if True:
print 'goodbye' |
xiaxia47/Python-learning | refs/heads/master | spiders/downloadpic.py | 1 | import os
from urllib.request import urlretrieve
from urllib.request import urlopen
from bs4 import BeautifulSoup
def getAbsoluteUrl(baseUrl, source):
if source.startswith("http://www."):
url= "http://" + source[11:]
elif source.startswith("http://"):
url = source
elif source.startswith("www."):
url = source[4:]
url = "http://" + source
else:
url = baseUrl + "/" + source
if baseUrl not in url:
return None
return url
def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):
path = absoluteUrl.replace("www.", "")
path = path.replace(baseUrl, "")
path = downloadDirectory + path
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
downloadDirectory = 'D:/Python Learning/download/'
baseUrl = "http://pythonscraping.com"
html = urlopen("http://www.pythonscraping.com")
bsObj = BeautifulSoup(html,"html.parser")
#imageLocation = bsObj.find("a", {"id": "logo"}).find("img")["src"]
#urlretrieve(imageLocation, "logo.jpg")
downloadList = bsObj.findAll(src=True)
for download in downloadList:
fileUrl = getAbsoluteUrl(baseUrl, download["src"])
if fileUrl is not None:
print(fileUrl)
#urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory))
|
LucHermitte/ITK | refs/heads/optimize-vectorimage-unsorted | Wrapping/Generators/Python/Tests/LaplacianImageFilter.py | 19 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the LaplacianImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.F, dim]
OIType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New(FileName=argv[1])
filter = itk.LaplacianImageFilter[IType, IType].New(reader)
cast = itk.RescaleIntensityImageFilter[IType, OIType].New(filter,
OutputMinimum=0,
OutputMaximum=255)
writer = itk.ImageFileWriter[OIType].New(cast, FileName=argv[2])
writer.Update()
|
kenshay/ImageScript | refs/heads/master | Script_Runner/PYTHON/Lib/site-packages/pip/_internal/__init__.py | 8 | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
# We want to inject the use of SecureTransport as early as possible so that any
# references or sessions or what have you are ensured to have it, however we
# only want to do this in the case that we're running on macOS and the linked
# OpenSSL is too old to handle TLSv1.2
try:
import ssl
except ImportError:
pass
else:
# Checks for OpenSSL 1.0.1 on MacOS
if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f:
try:
from pip._vendor.urllib3.contrib import securetransport
except (ImportError, OSError):
pass
else:
securetransport.inject_into_urllib3()
from pip import __version__
from pip._internal import cmdoptions
from pip._internal.exceptions import CommandError, PipError
from pip._internal.utils.misc import get_installed_distributions, get_prog
from pip._internal.utils import deprecation
from pip._internal.vcs import git, mercurial, subversion, bazaar # noqa
from pip._internal.baseparser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.commands import get_summaries, get_similar_commands
from pip._internal.commands import commands_dict
from pip._vendor.urllib3.exceptions import InsecureRequestWarning
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
for opt in opts:
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3],
)
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip._internal.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
|
StackStorm/st2 | refs/heads/master | st2common/st2common/runners/utils.py | 3 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import logging as stdlib_logging
import six
from oslo_config import cfg
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common import log as logging
__all__ = [
"PackConfigDict",
"get_logger_for_python_runner_action",
"get_action_class_instance",
"make_read_and_store_stream_func",
"invoke_post_run",
]
LOG = logging.getLogger(__name__)
# Error which is thrown when Python action tries to access self.config key which doesn't exist
CONFIG_MISSING_ITEM_ERROR = """
Config for pack "%s" is missing key "%s".
Make sure that the config file exists on disk (%s) and contains that key.
Also make sure you run "st2ctl reload --register-configs" when you add a
config and after every change you make to the config.
"""
# Maps logger name to the actual logger instance
# We re-use loggers for the same actions to make sure only a single instance exists for a
# particular action. This way we avoid duplicate log messages, etc.
LOGGERS = {}
class PackConfigDict(dict):
"""
Dictionary class wraper for pack config dictionaries.
This class throws a user-friendly exception in case user tries to access config item which
doesn't exist in the dict.
"""
def __init__(self, pack_name, *args):
super(PackConfigDict, self).__init__(*args)
self._pack_name = pack_name
def __getitem__(self, key):
try:
value = super(PackConfigDict, self).__getitem__(key)
except KeyError:
# Note: We use late import to avoid performance overhead
from oslo_config import cfg
configs_path = os.path.join(cfg.CONF.system.base_path, "configs/")
config_path = os.path.join(configs_path, self._pack_name + ".yaml")
msg = CONFIG_MISSING_ITEM_ERROR % (self._pack_name, key, config_path)
raise ValueError(msg)
return value
def __setitem__(self, key, value):
super(PackConfigDict, self).__setitem__(key, value)
def get_logger_for_python_runner_action(action_name, log_level="debug"):
"""
Set up a logger which logs all the messages with level DEBUG and above to stderr.
"""
logger_name = "actions.python.%s" % (action_name)
if logger_name not in LOGGERS:
level_name = log_level.upper()
log_level_constant = getattr(stdlib_logging, level_name, stdlib_logging.DEBUG)
logger = logging.getLogger(logger_name)
console = stdlib_logging.StreamHandler()
console.setLevel(log_level_constant)
formatter = stdlib_logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
console.setFormatter(formatter)
logger.addHandler(console)
logger.setLevel(log_level_constant)
LOGGERS[logger_name] = logger
else:
logger = LOGGERS[logger_name]
return logger
def get_action_class_instance(action_cls, config=None, action_service=None):
"""
Instantiate and return Action class instance.
:param action_cls: Action class to instantiate.
:type action_cls: ``class``
:param config: Config to pass to the action class.
:type config: ``dict``
:param action_service: ActionService instance to pass to the class.
:type action_service: :class:`ActionService`
"""
kwargs = {}
kwargs["config"] = config
kwargs["action_service"] = action_service
# Note: This is done for backward compatibility reasons. We first try to pass
# "action_service" argument to the action class constructor, but if that doesn't work (e.g. old
# action which hasn't been updated yet), we resort to late assignment post class instantiation.
# TODO: Remove in next major version once all the affected actions have been updated.
try:
action_instance = action_cls(**kwargs)
except TypeError as e:
if "unexpected keyword argument 'action_service'" not in six.text_type(e):
raise e
LOG.debug(
'Action class (%s) constructor doesn\'t take "action_service" argument, '
"falling back to late assignment..." % (action_cls.__class__.__name__)
)
action_service = kwargs.pop("action_service", None)
action_instance = action_cls(**kwargs)
action_instance.action_service = action_service
return action_instance
def make_read_and_store_stream_func(execution_db, action_db, store_data_func):
"""
Factory function which returns a function for reading from a stream (stdout / stderr).
This function writes read data into a buffer and stores it in a database.
"""
# NOTE: This import has intentionally been moved here to avoid massive performance overhead
# (1+ second) for other functions inside this module which don't need to use those imports.
from st2common.util import concurrency
greenlet_exit_exc_cls = concurrency.get_greenlet_exit_exception_class()
def read_and_store_stream(stream, buff):
try:
while not stream.closed:
line = stream.readline()
if not line:
break
if isinstance(line, six.binary_type):
line = line.decode("utf-8")
buff.write(line)
# Filter out result delimiter lines
if ACTION_OUTPUT_RESULT_DELIMITER in line:
continue
if cfg.CONF.actionrunner.stream_output:
store_data_func(
execution_db=execution_db, action_db=action_db, data=line
)
except RuntimeError:
# process was terminated abruptly
pass
except greenlet_exit_exc_cls:
# Green thread exited / was killed
pass
return read_and_store_stream
def invoke_post_run(liveaction_db, action_db=None):
# NOTE: This import has intentionally been moved here to avoid massive performance overhead
# (1+ second) for other functions inside this module which don't need to use those imports.
from st2common.runners import base as runners
from st2common.util import action_db as action_db_utils
from st2common.content import utils as content_utils
LOG.info("Invoking post run for action execution %s.", liveaction_db.id)
# Identify action and runner.
if not action_db:
action_db = action_db_utils.get_action_by_ref(liveaction_db.action)
if not action_db:
LOG.error(
"Unable to invoke post run. Action %s no longer exists.",
liveaction_db.action,
)
return
LOG.info(
"Action execution %s runs %s of runner type %s.",
liveaction_db.id,
action_db.name,
action_db.runner_type["name"],
)
# Get instance of the action runner and related configuration.
runner_type_db = action_db_utils.get_runnertype_by_name(
action_db.runner_type["name"]
)
runner = runners.get_runner(name=runner_type_db.name)
entry_point = content_utils.get_entry_point_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point
)
libs_dir_path = content_utils.get_action_libs_abs_path(
pack=action_db.pack, entry_point=action_db.entry_point
)
# Configure the action runner.
runner.runner_type_db = runner_type_db
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction = liveaction_db
runner.liveaction_id = str(liveaction_db.id)
runner.entry_point = entry_point
runner.context = getattr(liveaction_db, "context", dict())
runner.callback = getattr(liveaction_db, "callback", dict())
runner.libs_dir_path = libs_dir_path
# Invoke the post_run method.
runner.post_run(liveaction_db.status, liveaction_db.result)
|
felipebetancur/scipy | refs/heads/master | benchmarks/benchmarks/sparse_linalg_expm.py | 52 | """benchmarks for the scipy.sparse.linalg._expm_multiply module"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
try:
import scipy.linalg
from scipy.sparse.linalg import expm_multiply
except ImportError:
pass
from .common import Benchmark
def random_sparse_csr(m, n, nnz_per_row):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = np.random.random_integers(low=0, high=n-1, size=nnz_per_row*m)
vals = np.random.random_sample(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals,(rows,cols)), (m,n), dtype=float)
return M.tocsr()
def random_sparse_csc(m, n, nnz_per_row):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = np.random.random_integers(low=0, high=n-1, size=nnz_per_row*m)
vals = np.random.random_sample(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals,(rows,cols)), (m,n), dtype=float)
# Use csc instead of csr, because sparse LU decomposition
# raises a warning when I use csr.
return M.tocsc()
class ExpmMultiply(Benchmark):
params = [['sparse', 'full']]
param_names = ['run format']
def setup(self, *args):
self.n = 2000
self.i = 100
self.j = 200
nnz_per_row = 25
self.A = random_sparse_csr(self.n, self.n, nnz_per_row)
self.A_dense = self.A.toarray()
def time_expm_multiply(self, format):
if format == 'full':
# computing full expm of the dense array...
A_expm = scipy.linalg.expm(self.A_dense)
A_expm[self.i, self.j]
else:
# computing only column', j, 'of expm of the sparse matrix...
v = np.zeros(self.n, dtype=float)
v[self.j] = 1
A_expm_col_j = expm_multiply(self.A, v)
A_expm_col_j[self.i]
class Expm(Benchmark):
params = [
[30, 100, 300],
['sparse', 'dense']
]
param_names = ['n', 'format']
def setup(self, n, format):
np.random.seed(1234)
# Let the number of nonzero entries per row
# scale like the log of the order of the matrix.
nnz_per_row = int(math.ceil(math.log(n)))
# time the sampling of a random sparse matrix
self.A_sparse = random_sparse_csc(n, n, nnz_per_row)
# first format conversion
self.A_dense = self.A_sparse.toarray()
def time_expm(self, n, format):
if format == 'sparse':
scipy.linalg.expm(self.A_sparse)
elif format == 'dense':
scipy.linalg.expm(self.A_dense)
|
DR08/mxnet | refs/heads/stable | example/neural-style/nstyle.py | 52 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import find_mxnet
import mxnet as mx
import numpy as np
import importlib
import logging
logging.basicConfig(level=logging.DEBUG)
import argparse
from collections import namedtuple
from skimage import io, transform
from skimage.restoration import denoise_tv_chambolle
CallbackData = namedtuple('CallbackData', field_names=['eps','epoch','img','filename'])
def get_args(arglist=None):
parser = argparse.ArgumentParser(description='neural style')
parser.add_argument('--model', type=str, default='vgg19',
choices = ['vgg'],
help = 'the pretrained model to use')
parser.add_argument('--content-image', type=str, default='input/IMG_4343.jpg',
help='the content image')
parser.add_argument('--style-image', type=str, default='input/starry_night.jpg',
help='the style image')
parser.add_argument('--stop-eps', type=float, default=.005,
help='stop if the relative chanage is less than eps')
parser.add_argument('--content-weight', type=float, default=10,
help='the weight for the content image')
parser.add_argument('--style-weight', type=float, default=1,
help='the weight for the style image')
parser.add_argument('--tv-weight', type=float, default=1e-2,
help='the magtitute on TV loss')
parser.add_argument('--max-num-epochs', type=int, default=1000,
help='the maximal number of training epochs')
parser.add_argument('--max-long-edge', type=int, default=600,
help='resize the content image')
parser.add_argument('--lr', type=float, default=.001,
help='the initial learning rate')
parser.add_argument('--gpu', type=int, default=0,
help='which gpu card to use, -1 means using cpu')
parser.add_argument('--output_dir', type=str, default='output/',
help='the output image')
parser.add_argument('--save-epochs', type=int, default=50,
help='save the output every n epochs')
parser.add_argument('--remove-noise', type=float, default=.02,
help='the magtitute to remove noise')
parser.add_argument('--lr-sched-delay', type=int, default=75,
help='how many epochs between decreasing learning rate')
parser.add_argument('--lr-sched-factor', type=int, default=0.9,
help='factor to decrease learning rate on schedule')
if arglist is None:
return parser.parse_args()
else:
return parser.parse_args(arglist)
def PreprocessContentImage(path, long_edge):
img = io.imread(path)
logging.info("load the content image, size = %s", img.shape[:2])
factor = float(long_edge) / max(img.shape[:2])
new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
resized_img = transform.resize(img, new_size)
sample = np.asarray(resized_img) * 256
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
sample[0, :] -= 123.68
sample[1, :] -= 116.779
sample[2, :] -= 103.939
logging.info("resize the content image to %s", new_size)
return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
def PreprocessStyleImage(path, shape):
img = io.imread(path)
resized_img = transform.resize(img, (shape[2], shape[3]))
sample = np.asarray(resized_img) * 256
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
sample[0, :] -= 123.68
sample[1, :] -= 116.779
sample[2, :] -= 103.939
return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
def PostprocessImage(img):
img = np.resize(img, (3, img.shape[2], img.shape[3]))
img[0, :] += 123.68
img[1, :] += 116.779
img[2, :] += 103.939
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 2)
img = np.clip(img, 0, 255)
return img.astype('uint8')
def SaveImage(img, filename, remove_noise=0.):
logging.info('save output to %s', filename)
out = PostprocessImage(img)
if remove_noise != 0.0:
out = denoise_tv_chambolle(out, weight=remove_noise, multichannel=True)
io.imsave(filename, out)
def style_gram_symbol(input_size, style):
_, output_shapes, _ = style.infer_shape(data=(1, 3, input_size[0], input_size[1]))
gram_list = []
grad_scale = []
for i in range(len(style.list_outputs())):
shape = output_shapes[i]
x = mx.sym.Reshape(style[i], target_shape=(int(shape[1]), int(np.prod(shape[2:]))))
# use fully connected to quickly do dot(x, x^T)
gram = mx.sym.FullyConnected(x, x, no_bias=True, num_hidden=shape[1])
gram_list.append(gram)
grad_scale.append(np.prod(shape[1:]) * shape[1])
return mx.sym.Group(gram_list), grad_scale
def get_loss(gram, content):
gram_loss = []
for i in range(len(gram.list_outputs())):
gvar = mx.sym.Variable("target_gram_%d" % i)
gram_loss.append(mx.sym.sum(mx.sym.square(gvar - gram[i])))
cvar = mx.sym.Variable("target_content")
content_loss = mx.sym.sum(mx.sym.square(cvar - content))
return mx.sym.Group(gram_loss), content_loss
def get_tv_grad_executor(img, ctx, tv_weight):
"""create TV gradient executor with input binded on img
"""
if tv_weight <= 0.0:
return None
nchannel = img.shape[1]
simg = mx.sym.Variable("img")
skernel = mx.sym.Variable("kernel")
channels = mx.sym.SliceChannel(simg, num_outputs=nchannel)
out = mx.sym.Concat(*[
mx.sym.Convolution(data=channels[i], weight=skernel,
num_filter=1,
kernel=(3, 3), pad=(1,1),
no_bias=True, stride=(1,1))
for i in range(nchannel)])
kernel = mx.nd.array(np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
.reshape((1, 1, 3, 3)),
ctx) / 8.0
out = out * tv_weight
return out.bind(ctx, args={"img": img,
"kernel": kernel})
def train_nstyle(args, callback=None):
"""Train a neural style network.
Args are from argparse and control input, output, hyper-parameters.
callback allows for display of training progress.
"""
# input
dev = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu()
content_np = PreprocessContentImage(args.content_image, args.max_long_edge)
style_np = PreprocessStyleImage(args.style_image, shape=content_np.shape)
size = content_np.shape[2:]
# model
Executor = namedtuple('Executor', ['executor', 'data', 'data_grad'])
model_module = importlib.import_module('model_' + args.model)
style, content = model_module.get_symbol()
gram, gscale = style_gram_symbol(size, style)
model_executor = model_module.get_executor(gram, content, size, dev)
model_executor.data[:] = style_np
model_executor.executor.forward()
style_array = []
for i in range(len(model_executor.style)):
style_array.append(model_executor.style[i].copyto(mx.cpu()))
model_executor.data[:] = content_np
model_executor.executor.forward()
content_array = model_executor.content.copyto(mx.cpu())
# delete the executor
del model_executor
style_loss, content_loss = get_loss(gram, content)
model_executor = model_module.get_executor(
style_loss, content_loss, size, dev)
grad_array = []
for i in range(len(style_array)):
style_array[i].copyto(model_executor.arg_dict["target_gram_%d" % i])
grad_array.append(mx.nd.ones((1,), dev) * (float(args.style_weight) / gscale[i]))
grad_array.append(mx.nd.ones((1,), dev) * (float(args.content_weight)))
print([x.asscalar() for x in grad_array])
content_array.copyto(model_executor.arg_dict["target_content"])
# train
# initialize img with random noise
img = mx.nd.zeros(content_np.shape, ctx=dev)
img[:] = mx.rnd.uniform(-0.1, 0.1, img.shape)
lr = mx.lr_scheduler.FactorScheduler(step=args.lr_sched_delay,
factor=args.lr_sched_factor)
optimizer = mx.optimizer.NAG(
learning_rate = args.lr,
wd = 0.0001,
momentum=0.95,
lr_scheduler = lr)
optim_state = optimizer.create_state(0, img)
logging.info('start training arguments %s', args)
old_img = img.copyto(dev)
clip_norm = 1 * np.prod(img.shape)
tv_grad_executor = get_tv_grad_executor(img, dev, args.tv_weight)
for e in range(args.max_num_epochs):
img.copyto(model_executor.data)
model_executor.executor.forward()
model_executor.executor.backward(grad_array)
gnorm = mx.nd.norm(model_executor.data_grad).asscalar()
if gnorm > clip_norm:
model_executor.data_grad[:] *= clip_norm / gnorm
if tv_grad_executor is not None:
tv_grad_executor.forward()
optimizer.update(0, img,
model_executor.data_grad + tv_grad_executor.outputs[0],
optim_state)
else:
optimizer.update(0, img, model_executor.data_grad, optim_state)
new_img = img
eps = (mx.nd.norm(old_img - new_img) / mx.nd.norm(new_img)).asscalar()
old_img = new_img.copyto(dev)
logging.info('epoch %d, relative change %f', e, eps)
if eps < args.stop_eps:
logging.info('eps < args.stop_eps, training finished')
break
if callback:
cbdata = {
'eps': eps,
'epoch': e+1,
}
if (e+1) % args.save_epochs == 0:
outfn = args.output_dir + 'e_'+str(e+1)+'.jpg'
npimg = new_img.asnumpy()
SaveImage(npimg, outfn, args.remove_noise)
if callback:
cbdata['filename'] = outfn
cbdata['img'] = npimg
if callback:
callback(cbdata)
final_fn = args.output_dir + '/final.jpg'
SaveImage(new_img.asnumpy(), final_fn)
if __name__ == "__main__":
args = get_args()
train_nstyle(args)
|
levkar/odoo | refs/heads/10.0 | addons/l10n_br/migrations/9.0.1.0/post-migrate_tags_on_taxes.py | 832 | # -*- coding: utf-8 -*-
import odoo
def migrate(cr, version):
registry = odoo.registry(cr.dbname)
from odoo.addons.account.models.chart_template import migrate_tags_on_taxes
migrate_tags_on_taxes(cr, registry)
|
vmayoral/basic_reinforcement_learning | refs/heads/master | tutorial14/code/train_ppo1.py | 1 | import gym
from baselines.ppo1 import mlp_policy, pposgd_simple
import tensorflow as tf
import argparse
#parser
parser = argparse.ArgumentParser()
parser.add_argument('--environment', dest='environment', type=str, default='MountainCarContinuous-v0')
parser.add_argument('--num_timesteps', dest='num_timesteps', type=int, default=10000)
args = parser.parse_args()
env = gym.envs.make(str(args.environment))
g = tf.Graph()
with g.as_default():
# tf.reset_default_graph()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
pposgd_simple.learn(env,
policy_fn,
max_timesteps=args.num_timesteps,
timesteps_per_actorbatch=2048, # timesteps per actor per update
# timesteps_per_actorbatch=128, # timesteps per actor per update
clip_param=0.2,
entcoeff=0.0,
optim_epochs=10,
optim_stepsize=3e-4,
optim_batchsize=64,
gamma=0.99,
lam=0.95,
schedule='linear',
save_model_with_prefix=str(env.__class__.__name__), # typically, the env.
outdir="/tmp/experiments/"+str(args.environment)+"/PPO1/") # path for the log files (tensorboard) and models
# act.save("models/mountaincar_continuous_model_PPO_"+str(m)+".pkl")
|
TruLtd/tru-reputation-token | refs/heads/master | docs/conf.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tru Reputation Token documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 24 19:00:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tru Reputation Token'
copyright = '2017, Tru Ltd'
author = 'Tru Ltd'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.12'
# The full version, including alpha/beta/rc tags.
release = '0.1.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TruReputationTokendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TruReputationToken.tex', 'Tru Reputation Token Documentation',
'Tru Ltd', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trureputationtoken', 'Tru Reputation Token Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TruReputationToken', 'Tru Reputation Token Documentation',
author, 'TruReputationToken', 'One line description of project.',
'Miscellaneous'),
]
|
AgenttiX/fys-1010 | refs/heads/master | specific_charge_of_electron/tools.py | 1 | # insert copyleft licence here
import numpy as np
from math import log10, floor
import math
from bokeh.models import Label
class iterating_colors:
"""
This class is made to mimic matlab-like color plotting behavior. Calling get_next() method
returns new color in list, without hazzle. By default colorset is same as in matlab
"""
def __init__(self, palette="matlab"):
if palette == "matlab":
self.colors = [ # matlab_line_plot_colors
[0.0000, 0.4470, 0.7410],
[0.8500, 0.3250, 0.0980],
[0.9290, 0.6940, 0.1250],
[0.4940, 0.1840, 0.5560],
[0.4660, 0.6740, 0.1880],
[0.3010, 0.7450, 0.9330],
[0.6350, 0.0780, 0.1840]]
if palette == "octave":
self.colors = [ # octave and old matlab (version <= R2014a) line plotting colors
[0.00000, 0.00000, 1.00000],
[0.00000, 0.50000, 0.00000],
[1.00000, 0.00000, 0.00000],
[0.00000, 0.75000, 0.75000],
[0.75000, 0.00000, 0.75000],
[0.75000, 0.75000, 0.00000],
[0.25000, 0.25000, 0.25000]]
if palette == "long":
# from
# http://blogs.mathworks.com/pick/2008/08/15/colors-for-your-multi-line-plots/
self.colors = [
[0.00, 0.00, 1.00],
[0.00, 0.50, 0.00],
[1.00, 0.00, 0.00],
[0.00, 0.75, 0.75],
[0.75, 0.00, 0.75],
[0.75, 0.75, 0.00],
[0.25, 0.25, 0.25],
[0.75, 0.25, 0.25],
[0.95, 0.95, 0.00],
[0.25, 0.25, 0.75],
[0.75, 0.75, 0.75],
[0.00, 1.00, 0.00],
[0.76, 0.57, 0.17],
[0.54, 0.63, 0.22],
[0.34, 0.57, 0.92],
[1.00, 0.10, 0.60],
[0.88, 0.75, 0.73],
[0.10, 0.49, 0.47],
[0.66, 0.34, 0.65],
[0.99, 0.41, 0.23]]
self.current_index = 0
def get_next(self):
"""
Returns next color in colormap, color jumps to first one after the last color is used.
:return: tuple length of 3, type: int varitying from 0 to 255.
"""
m = len(self.colors) # the number of rows
color = self.colors[self.current_index % m]
RGB_color = [round(x*255.45) for x in color]
self.current_index += 1
return tuple(RGB_color)
def reset(self):
self.current_index = 0
def linear_regression_origo(x_axis, data_points):
"""
This fuction calulates linear regresion (slope and error) for line which goes through origo.
The approach is rather manual, but precise and clear.
:param data_points: numpy array
:return:
micro slope of fitted line
err mean error in that slope
"""
# these libraries are shit
# return np.linalg.lstsq(x_axis,data_points)[0]
# Numpy arrays can't be column vectors! Not lying! They are that feeble and ambiguous by default.
x_mat = np.transpose(np.matrix(x_axis))
y_mat = np.transpose(np.matrix(data_points))
# If it is wanted that origo is not fixed, then replace x_mat by X_mat in micro calculations
X_mat = np.concatenate((x_mat, np.ones((x_axis.shape[0], 1))), axis=1)
# No really, these libraries _are_ pure shit! They break down if some matrix-dimension is one!
# return np.linalg.lstsq(X_mat, y_mat)[0]
# Then let's do it the hard way.
# https://en.wikipedia.org/wiki/Linear_regression
# https://en.wikipedia.org/wiki/Least_squares
micro = (np.linalg.inv(x_mat.transpose() * x_mat) * np.transpose(x_mat) * y_mat)[0, 0]
# https://en.wikipedia.org/wiki/Mean_squared_error
# https://en.wikipedia.org/wiki/Standard_deviation
# https://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption
x_mean = np.sum(x_mat)/x_mat.size
dof = 1 # degrees of freedom
MSE = (1/(y_mat.size-dof)) * \
np.sum(np.multiply((y_mat-micro*x_mat), (y_mat-micro*x_mat))) / \
np.sum(np.multiply((x_mat-x_mean), (x_mat-x_mean)))
err = np.sqrt(MSE)
return micro, err
def print_to_latex_tabular(matrix, column_precisions=None, significant_figures=False):
"""
Prints 2d-numpy arrays (or regular lists) to latex tabular format. Then just copy-paste it.
:param matrix: matrix to print
(list<float> or numpy_array<float>)
:param column_precisions: single value OR array of precisions for each column
(int, list<int> or numpy_array<int>, len=columns)
:param significant_figures: if false then the column_precisions corresponds normal decimal precision
if true then column_precisions corresponds the number of numbers to be printed
(bool)
:return:
Examples:
column_precisions=[...,4,...], significant_figures=True:
0.0012345678 -> 0.001234
column_precisions=[...,4,...], significant_figures=False:
0.0012345678 -> 0.0012
"""
# I f***ing hate numpy's s***ty and poor arrays. In this case the second dimension is totally
# undefined when it is an 1D-array. So some extra unnecessary code is required for
# ridiculously simple things. This is NOT what python ought to be.
array = np.matrix(matrix) # here I have contradictory naming just for joy of python
# python syntax for checking if np.shape empty tuple, i.e. col_pres is int. Clear? Not.
if (column_precisions is not None) and not np.shape(column_precisions):
col_pres = np.ones(np.shape(array)[1], dtype=np.int) * column_precisions
else:
col_pres = column_precisions
if (col_pres is not None) and (np.shape(array)[1] != len(col_pres)):
print(array)
print(" np.shape(array)[1]", np.shape(array)[1], " len(col_pres)", len(col_pres))
raise Exception("col_pres should be vector of length of columns")
array_to_print = [["" for n in range(np.shape(array)[1])] for m in range(np.shape(array)[0])]
# convert array to printable form
for m in range(np.shape(array)[0]):
for n in range(np.shape(array)[1]):
if col_pres is None:
array_to_print[m][n] = str(array[m, n])
elif significant_figures:
# logarithm and value of exact zero is not a good combination
if not math.isclose(array[m, n], 0):
pres = -int(floor(log10(abs(array[m, n]))))+col_pres[n]-1
else:
pres = col_pres[n]
if (pres>0):
array_to_print[m][n] = ("{:." + str(pres) + "f}").format(round(array[m, n], pres))
else:
array_to_print[m][n] = str(int(round(array[m, n], pres)))
elif (not significant_figures) and (col_pres[n] > 0):
pres = col_pres[n]
array_to_print[m][n] = ("{:."+str(pres)+"f}").format(round(array[m, n], pres))
# print no decimals at all (integers), (negative col_pres values are permitted)
else:
array_to_print[m][n] = str(int(round(array[m, n], col_pres[n])))
# find the column lengths (cells with most characters)
max_column_len = np.amax(np.vectorize(lambda cell: len(cell))(array_to_print), axis=0)
print("")
print("\\begin{tabular}{" + np.shape(array)[1]*"|l" + "|}\n", end="", sep="")
print("\\hline")
print((" & " * (np.shape(array)[1]-1) + " \\\\"))
print("\\hline")
for m in range(np.shape(array)[0]):
for n in range(np.shape(array)[1]):
# print and trailing spaces to max width so tabulars are nicely readable
print(("{:"+str(max_column_len[n])+"}").format(array_to_print[m][n]), end="", sep="")
if n != np.shape(array)[1]-1:
print(" & ", end="", sep="")
else:
print(" \\\\\n", end="", sep="")
print("\\hline")
print(r"\end{tabular}")
print("")
|
gmacchi93/serverInfoParaguay | refs/heads/master | apps/venv/lib/python2.7/site-packages/rest_framework/pagination.py | 8 | # coding: utf-8
"""
Pagination serializers determine the structure of the output that should
be used for paginated responses.
"""
from __future__ import unicode_literals
from base64 import b64encode, b64decode
from collections import namedtuple
from django.core.paginator import InvalidPage, Paginator as DjangoPaginator
from django.template import Context, loader
from django.utils import six
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import OrderedDict
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import (
replace_query_param, remove_query_param
)
import warnings
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def _divide_with_ceil(a, b):
"""
Returns 'a' divded by 'b', with any remainder rounded up.
"""
if a % b:
return (a // b) + 1
return a // b
def _get_count(queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
def _get_displayed_page_numbers(current, final):
"""
This utility function determines a list of page numbers to display.
This gives us a nice contextually relevant set of page numbers.
For example:
current=14, final=16 -> [1, None, 13, 14, 15, 16]
This implementation gives one page to each side of the cursor,
or two pages to the side when the cursor is at the edge, then
ensures that any breaks between non-continous page numbers never
remove only a single page.
For an alernativative implementation which gives two pages to each side of
the cursor, eg. as in GitHub issue list pagination, see:
https://gist.github.com/tomchristie/321140cebb1c4a558b15
"""
assert current >= 1
assert final >= current
if final <= 5:
return list(range(1, final + 1))
# We always include the first two pages, last two pages, and
# two pages either side of the current page.
included = set((
1,
current - 1, current, current + 1,
final
))
# If the break would only exclude a single page number then we
# may as well include the page number instead of the break.
if current <= 4:
included.add(2)
included.add(3)
if current >= final - 3:
included.add(final - 1)
included.add(final - 2)
# Now sort the page numbers and drop anything outside the limits.
included = [
idx for idx in sorted(list(included))
if idx > 0 and idx <= final
]
# Finally insert any `...` breaks
if current > 4:
included.insert(1, None)
if current < final - 3:
included.insert(len(included) - 1, None)
return included
def _get_page_links(page_numbers, current, url_func):
"""
Given a list of page numbers and `None` page breaks,
return a list of `PageLink` objects.
"""
page_links = []
for page_number in page_numbers:
if page_number is None:
page_link = PAGE_BREAK
else:
page_link = PageLink(
url=url_func(page_number),
number=page_number,
is_active=(page_number == current),
is_break=False
)
page_links.append(page_link)
return page_links
def _decode_cursor(encoded):
"""
Given a string representing an encoded cursor, return a `Cursor` instance.
"""
# The offset in the cursor is used in situations where we have a
# nearly-unique index. (Eg millisecond precision creation timestamps)
# We guard against malicious users attempting to cause expensive database
# queries, by having a hard cap on the maximum possible size of the offset.
OFFSET_CUTOFF = 1000
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=OFFSET_CUTOFF)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
position = tokens.get('p', [None])[0]
except (TypeError, ValueError):
return None
return Cursor(offset=offset, reverse=reverse, position=position)
def _encode_cursor(cursor):
"""
Given a Cursor instance, return an encoded string representation.
"""
tokens = {}
if cursor.offset != 0:
tokens['o'] = str(cursor.offset)
if cursor.reverse:
tokens['r'] = '1'
if cursor.position is not None:
tokens['p'] = cursor.position
querystring = urlparse.urlencode(tokens, doseq=True)
return b64encode(querystring.encode('ascii')).decode('ascii')
def _reverse_ordering(ordering_tuple):
"""
Given an order_by tuple such as `('-created', 'uuid')` reverse the
ordering and return a new tuple, eg. `('created', '-uuid')`.
"""
def invert(x):
return x[1:] if (x.startswith('-')) else '-' + x
return tuple([invert(item) for item in ordering_tuple])
Cursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])
PageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])
PAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)
class BasePagination(object):
display_page_controls = False
def paginate_queryset(self, queryset, request, view=None): # pragma: no cover
raise NotImplementedError('paginate_queryset() must be implemented.')
def get_paginated_response(self, data): # pragma: no cover
raise NotImplementedError('get_paginated_response() must be implemented.')
def to_html(self): # pragma: no cover
raise NotImplementedError('to_html() must be implemented to display page controls.')
class PageNumberPagination(BasePagination):
"""
A simple page number based style that supports page numbers as
query parameters. For example:
http://api.example.org/accounts/?page=4
http://api.example.org/accounts/?page=4&page_size=100
"""
# The default page size.
# Defaults to `None`, meaning pagination is disabled.
page_size = api_settings.PAGE_SIZE
# Client can control the page using this query parameter.
page_query_param = 'page'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = None
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = None
last_page_strings = ('last',)
template = 'rest_framework/pagination/numbers.html'
invalid_page_message = _('Invalid page "{page_number}": {message}.')
def _handle_backwards_compat(self, view):
"""
Prior to version 3.1, pagination was handled in the view, and the
attributes were set there. The attributes should now be set on
the pagination class, but the old style is still pending deprecation.
"""
assert not (
getattr(view, 'pagination_serializer_class', None) or
getattr(api_settings, 'DEFAULT_PAGINATION_SERIALIZER_CLASS', None)
), (
"The pagination_serializer_class attribute and "
"DEFAULT_PAGINATION_SERIALIZER_CLASS setting have been removed as "
"part of the 3.1 pagination API improvement. See the pagination "
"documentation for details on the new API."
)
for (settings_key, attr_name) in (
('PAGINATE_BY', 'page_size'),
('PAGINATE_BY_PARAM', 'page_size_query_param'),
('MAX_PAGINATE_BY', 'max_page_size')
):
value = getattr(api_settings, settings_key, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` settings key is pending deprecation. "
"Use the `%s` attribute on the pagination class instead." % (
settings_key, attr_name
),
PendingDeprecationWarning,
)
for (view_attr, attr_name) in (
('paginate_by', 'page_size'),
('page_query_param', 'page_query_param'),
('paginate_by_param', 'page_size_query_param'),
('max_paginate_by', 'max_page_size')
):
value = getattr(view, view_attr, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` view attribute is pending deprecation. "
"Use the `%s` attribute on the pagination class instead." % (
view_attr, attr_name
),
PendingDeprecationWarning,
)
def paginate_queryset(self, queryset, request, view=None):
"""
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
self._handle_backwards_compat(view)
page_size = self.get_page_size(request)
if not page_size:
return None
paginator = DjangoPaginator(queryset, page_size)
page_number = request.query_params.get(self.page_query_param, 1)
if page_number in self.last_page_strings:
page_number = paginator.num_pages
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.count > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_page_size(self, request):
if self.page_size_query_param:
try:
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_next_link(self):
if not self.page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self.page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self.page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self.page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.page_query_param)
else:
return replace_query_param(base_url, self.page_query_param, page_number)
current = self.page.number
final = self.page.paginator.num_pages
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class LimitOffsetPagination(BasePagination):
"""
A limit/offset based style. For example:
http://api.example.org/accounts/?limit=100
http://api.example.org/accounts/?offset=400&limit=100
"""
default_limit = api_settings.PAGE_SIZE
limit_query_param = 'limit'
offset_query_param = 'offset'
max_limit = None
template = 'rest_framework/pagination/numbers.html'
def paginate_queryset(self, queryset, request, view=None):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.count = _get_count(queryset)
self.request = request
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
return list(queryset[self.offset:self.offset + self.limit])
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_limit(self, request):
if self.limit_query_param:
try:
return _positive_int(
request.query_params[self.limit_query_param],
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
try:
return _positive_int(
request.query_params[self.offset_query_param],
)
except (KeyError, ValueError):
return 0
def get_next_link(self):
if self.offset + self.limit >= self.count:
return None
url = self.request.build_absolute_uri()
offset = self.offset + self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_previous_link(self):
if self.offset <= 0:
return None
url = self.request.build_absolute_uri()
if self.offset - self.limit <= 0:
return remove_query_param(url, self.offset_query_param)
offset = self.offset - self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
current = _divide_with_ceil(self.offset, self.limit) + 1
# The number of pages is a little bit fiddly.
# We need to sum both the number of pages from current offset to end
# plus the number of pages up to the current offset.
# When offset is not strictly divisible by the limit then we may
# end up introducing an extra page as an artifact.
final = (
_divide_with_ceil(self.count - self.offset, self.limit) +
_divide_with_ceil(self.offset, self.limit)
)
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.offset_query_param)
else:
offset = self.offset + ((page_number - current) * self.limit)
return replace_query_param(base_url, self.offset_query_param, offset)
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class CursorPagination(BasePagination):
"""
The cursor pagination implementation is neccessarily complex.
For an overview of the position/offset style we use, see this post:
http://cramer.io/2011/03/08/building-cursors-for-the-disqus-api/
"""
cursor_query_param = 'cursor'
page_size = api_settings.PAGE_SIZE
invalid_cursor_message = _('Invalid cursor')
ordering = '-created'
template = 'rest_framework/pagination/previous_and_next.html'
def paginate_queryset(self, queryset, request, view=None):
if self.page_size is None:
return None
self.base_url = request.build_absolute_uri()
self.ordering = self.get_ordering(request, queryset, view)
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
self.cursor = None
(offset, reverse, current_position) = (0, False, None)
else:
self.cursor = _decode_cursor(encoded)
if self.cursor is None:
raise NotFound(self.invalid_cursor_message)
(offset, reverse, current_position) = self.cursor
# Cursor pagination always enforces an ordering.
if reverse:
queryset = queryset.order_by(*_reverse_ordering(self.ordering))
else:
queryset = queryset.order_by(*self.ordering)
# If we have a cursor with a fixed position then filter by that.
if current_position is not None:
order = self.ordering[0]
is_reversed = order.startswith('-')
order_attr = order.lstrip('-')
# Test for: (cursor reversed) XOR (queryset reversed)
if self.cursor.reverse != is_reversed:
kwargs = {order_attr + '__lt': current_position}
else:
kwargs = {order_attr + '__gt': current_position}
queryset = queryset.filter(**kwargs)
# If we have an offset cursor then offset the entire page by that amount.
# We also always fetch an extra item in order to determine if there is a
# page following on from this one.
results = list(queryset[offset:offset + self.page_size + 1])
self.page = list(results[:self.page_size])
# Determine the position of the final item following the page.
if len(results) > len(self.page):
has_following_postion = True
following_position = self._get_position_from_instance(results[-1], self.ordering)
else:
has_following_postion = False
following_position = None
# If we have a reverse queryset, then the query ordering was in reverse
# so we need to reverse the items again before returning them to the user.
if reverse:
self.page = list(reversed(self.page))
if reverse:
# Determine next and previous positions for reverse cursors.
self.has_next = (current_position is not None) or (offset > 0)
self.has_previous = has_following_postion
if self.has_next:
self.next_position = current_position
if self.has_previous:
self.previous_position = following_position
else:
# Determine next and previous positions for forward cursors.
self.has_next = has_following_postion
self.has_previous = (current_position is not None) or (offset > 0)
if self.has_next:
self.next_position = following_position
if self.has_previous:
self.previous_position = current_position
# Display page controls in the browsable API if there is more
# than one page.
if (self.has_previous or self.has_next) and self.template is not None:
self.display_page_controls = True
return self.page
def get_next_link(self):
if not self.has_next:
return None
if self.cursor and self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[-1], self.ordering)
else:
compare = self.next_position
offset = 0
for item in reversed(self.page):
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_previous:
# We are on the first page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# The change in direction will introduce a paging artifact,
# where we end up skipping forward a few extra items.
offset = 0
position = self.previous_position
else:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.previous_position
cursor = Cursor(offset=offset, reverse=False, position=position)
encoded = _encode_cursor(cursor)
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def get_previous_link(self):
if not self.has_previous:
return None
if self.cursor and not self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[0], self.ordering)
else:
compare = self.previous_position
offset = 0
for item in self.page:
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_next:
# We are on the final page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.next_position
else:
# The change in direction will introduce a paging artifact,
# where we end up skipping back a few extra items.
offset = 0
position = self.next_position
cursor = Cursor(offset=offset, reverse=True, position=position)
encoded = _encode_cursor(cursor)
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def get_ordering(self, request, queryset, view):
"""
Return a tuple of strings, that may be used in an `order_by` method.
"""
ordering_filters = [
filter_cls for filter_cls in getattr(view, 'filter_backends', [])
if hasattr(filter_cls, 'get_ordering')
]
if ordering_filters:
# If a filter exists on the view that implements `get_ordering`
# then we defer to that filter to determine the ordering.
filter_cls = ordering_filters[0]
filter_instance = filter_cls()
ordering = filter_instance.get_ordering(request, queryset, view)
assert ordering is not None, (
'Using cursor pagination, but filter class {filter_cls} '
'returned a `None` ordering.'.format(
filter_cls=filter_cls.__name__
)
)
else:
# The default case is to check for an `ordering` attribute
# on this pagination instance.
ordering = self.ordering
assert ordering is not None, (
'Using cursor pagination, but no ordering attribute was declared '
'on the pagination class.'
)
assert isinstance(ordering, (six.string_types, list, tuple)), (
'Invalid ordering. Expected string or tuple, but got {type}'.format(
type=type(ordering).__name__
)
)
if isinstance(ordering, six.string_types):
return (ordering,)
return tuple(ordering)
def _get_position_from_instance(self, instance, ordering):
attr = getattr(instance, ordering[0].lstrip('-'))
return six.text_type(attr)
def get_paginated_response(self, data):
return Response(OrderedDict([
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_html_context(self):
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link()
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
|
rohitwaghchaure/erpnext_smart | refs/heads/develop | erpnext/accounts/doctype/fiscal_year/fiscal_year.py | 35 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import getdate
from frappe.model.document import Document
class FiscalYear(Document):
def set_as_default(self):
frappe.db.set_value("Global Defaults", None, "current_fiscal_year", self.name)
frappe.get_doc("Global Defaults").on_update()
# clear cache
frappe.clear_cache()
msgprint(_("{0} is now the default Fiscal Year. Please refresh your browser for the change to take effect.").format(self.name))
def validate(self):
year_start_end_dates = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", (self.name))
if year_start_end_dates:
if getdate(self.year_start_date) != year_start_end_dates[0][0] or getdate(self.year_end_date) != year_start_end_dates[0][1]:
frappe.throw(_("Cannot change Fiscal Year Start Date and Fiscal Year End Date once the Fiscal Year is saved."))
def on_update(self):
# validate year start date and year end date
if getdate(self.year_start_date) > getdate(self.year_end_date):
frappe.throw(_("Fiscal Year Start Date should not be greater than Fiscal Year End Date"))
if (getdate(self.year_end_date) - getdate(self.year_start_date)).days > 366:
frappe.throw(_("Fiscal Year Start Date and Fiscal Year End Date cannot be more than a year apart."))
year_start_end_dates = frappe.db.sql("""select name, year_start_date, year_end_date
from `tabFiscal Year` where name!=%s""", (self.name))
for fiscal_year, ysd, yed in year_start_end_dates:
if (getdate(self.year_start_date) == ysd and getdate(self.year_end_date) == yed) \
and (not frappe.flags.in_test):
frappe.throw(_("Fiscal Year Start Date and Fiscal Year End Date are already set in Fiscal Year {0}").format(fiscal_year))
|
Shuailong/Leetcode | refs/heads/master | solutions/summary-ranges.py | 1 | #!/usr/bin/env python
# encoding: utf-8
"""
summary-ranges.py
Created by Shuailong on 2016-03-08.
https://leetcode.com/problems/summary-ranges/.
"""
class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
if len(nums) == 0:
return []
tuples = []
start_idx = 0
end_idx = 0
for i in range(1, len(nums)):
if nums[i] == nums[i-1] + 1:
end_idx += 1
else:
tuples.append((nums[start_idx], nums[end_idx]))
start_idx = i
end_idx = i
tuples.append((nums[start_idx], nums[end_idx]))
res = []
for t in tuples:
if t[0] != t[1]:
res.append(str(t[0]) + '->' + str(t[1]))
else:
res.append(str(t[0]))
return res
def main():
solution = Solution()
l = [0,1,2,4,5,7]
print solution.summaryRanges(l)
if __name__ == '__main__':
main()
|
TimJay/platformio | refs/heads/develop | platformio/commands/serialports.py | 3 | # Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import json
import sys
import click
from serial.tools import miniterm
from platformio.util import get_serialports
@click.group(short_help="List or Monitor Serial ports")
def cli():
pass
@cli.command("list", short_help="List Serial ports")
@click.option("--json-output", is_flag=True)
def serialports_list(json_output):
if json_output:
click.echo(json.dumps(get_serialports()))
return
for item in get_serialports():
click.secho(item['port'], fg="cyan")
click.echo("----------")
click.echo("Hardware ID: %s" % item['hwid'])
click.echo("Description: %s" % item['description'])
click.echo("")
@cli.command("monitor", short_help="Monitor Serial port")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, default=9600,
help="Set baud rate, default=9600")
@click.option("--parity", default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N")
@click.option("--rtscts", is_flag=True,
help="Enable RTS/CTS flow control, default=Off")
@click.option("--xonxoff", is_flag=True,
help="Enable software flow control, default=Off")
@click.option("--rts", default="0", type=click.Choice(["0", "1"]),
help="Set initial RTS line state, default=0")
@click.option("--dtr", default="0", type=click.Choice(["0", "1"]),
help="Set initial DTR line state, default=0")
@click.option("--echo", is_flag=True,
help="Enable local echo, default=Off")
@click.option("--cr", is_flag=True,
help="Do not send CR+LF, send CR only, default=Off")
@click.option("--lf", is_flag=True,
help="Do not send CR+LF, send LF only, default=Off")
@click.option("--debug", "-d", count=True,
help="""Debug received data (escape non-printable chars)
# --debug can be given multiple times:
# 0: just print what is received
# 1: escape non-printable characters, do newlines as unusual
# 2: escape non-printable characters, newlines too
# 3: hex dump everything""")
@click.option("--exit-char", type=int, default=0x1d,
help="ASCII code of special character that is used to exit the "
"application, default=0x1d")
@click.option("--menu-char", type=int, default=0x14,
help="ASCII code of special character that is used to control "
"miniterm (menu), default=0x14")
@click.option("--quiet", is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off")
def serialports_monitor(**kwargs):
sys.argv = sys.argv[3:]
if not kwargs['port']:
for item in get_serialports():
if "VID:PID" in item['hwid']:
sys.argv += ["--port", item['port']]
break
try:
miniterm.main()
except: # pylint: disable=W0702
pass
|
ichuang/sympy | refs/heads/master | sympy/polys/domains/fractionfield.py | 1 | """Implementation of :class:`FractionField` class. """
from sympy.polys.domains.field import Field
from sympy.polys.domains.compositedomain import CompositeDomain
from sympy.polys.domains.characteristiczero import CharacteristicZero
from sympy.polys.polyclasses import DMF
from sympy.polys.polyerrors import GeneratorsNeeded
from sympy.polys.polyutils import dict_from_basic, basic_from_dict, _dict_reorder
class FractionField(Field, CharacteristicZero, CompositeDomain):
"""A class for representing rational function fields. """
dtype = DMF
is_Frac = True
has_assoc_Ring = True
has_assoc_Field = True
def __init__(self, dom, *gens):
if not gens:
raise GeneratorsNeeded("generators not specified")
lev = len(gens) - 1
self.zero = self.dtype.zero(lev, dom)
self.one = self.dtype.one(lev, dom)
self.dom = dom
self.gens = gens
def __str__(self):
return str(self.dom) + '(' + ','.join(map(str, self.gens)) + ')'
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.dom, self.gens))
def __call__(self, a):
"""Construct an element of `self` domain from `a`. """
return DMF(a, self.dom, len(self.gens)-1)
def __eq__(self, other):
"""Returns `True` if two domains are equivalent. """
return self.dtype == other.dtype and self.dom == other.dom and self.gens == other.gens
def __ne__(self, other):
"""Returns `False` if two domains are equivalent. """
return not self.__eq__(other)
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
return (basic_from_dict(a.numer().to_sympy_dict(), *self.gens) /
basic_from_dict(a.denom().to_sympy_dict(), *self.gens))
def from_sympy(self, a):
"""Convert SymPy's expression to `dtype`. """
p, q = a.as_numer_denom()
num, _ = dict_from_basic(p, gens=self.gens)
den, _ = dict_from_basic(q, gens=self.gens)
for k, v in num.iteritems():
num[k] = self.dom.from_sympy(v)
for k, v in den.iteritems():
den[k] = self.dom.from_sympy(v)
return self((num, den)).cancel()
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_sympy(K1, a, K0):
"""Convert a SymPy `Integer` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_QQ_sympy(K1, a, K0):
"""Convert a SymPy `Rational` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_RR_sympy(K1, a, K0):
"""Convert a SymPy `Float` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_RR_mpmath(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_PolynomialRing(K1, a, K0):
"""Convert a `DMF` object to `dtype`. """
if K1.gens == K0.gens:
if K1.dom == K0.dom:
return K1(a.rep)
else:
return K1(a.convert(K1.dom).rep)
else:
monoms, coeffs = _dict_reorder(a.to_dict(), K0.gens, K1.gens)
if K1.dom != K0.dom:
coeffs = [ K1.dom.convert(c, K0.dom) for c in coeffs ]
return K1(dict(zip(monoms, coeffs)))
def from_FractionField(K1, a, K0):
"""
Convert a fraction field element to another fraction field.
Examples
========
>>> from sympy.polys.polyclasses import DMF
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.abc import x
>>> f = DMF(([ZZ(1), ZZ(2)], [ZZ(1), ZZ(1)]), ZZ)
>>> QQx = QQ.frac_field(x)
>>> ZZx = ZZ.frac_field(x)
>>> QQx.from_FractionField(f, ZZx)
DMF(([1/1, 2/1], [1/1, 1/1]), QQ)
"""
if K1.gens == K0.gens:
if K1.dom == K0.dom:
return a
else:
return K1((a.numer().convert(K1.dom).rep,
a.denom().convert(K1.dom).rep))
elif set(K0.gens).issubset(K1.gens):
nmonoms, ncoeffs = _dict_reorder(a.numer().to_dict(), K0.gens, K1.gens)
dmonoms, dcoeffs = _dict_reorder(a.denom().to_dict(), K0.gens, K1.gens)
if K1.dom != K0.dom:
ncoeffs = [ K1.dom.convert(c, K0.dom) for c in ncoeffs ]
dcoeffs = [ K1.dom.convert(c, K0.dom) for c in dcoeffs ]
return K1((dict(zip(nmonoms, ncoeffs)), dict(zip(dmonoms, dcoeffs))))
def get_ring(self):
"""Returns a ring associated with `self`. """
from sympy.polys.domains import PolynomialRing
return PolynomialRing(self.dom, *self.gens)
def poly_ring(self, *gens):
"""Returns a polynomial ring, i.e. `K[X]`. """
raise NotImplementedError('nested domains not allowed')
def frac_field(self, *gens):
"""Returns a fraction field, i.e. `K(X)`. """
raise NotImplementedError('nested domains not allowed')
def is_positive(self, a):
"""Returns True if `a` is positive. """
return self.dom.is_positive(a.numer().LC())
def is_negative(self, a):
"""Returns True if `a` is negative. """
return self.dom.is_negative(a.numer().LC())
def is_nonpositive(self, a):
"""Returns True if `a` is non-positive. """
return self.dom.is_nonpositive(a.numer().LC())
def is_nonnegative(self, a):
"""Returns True if `a` is non-negative. """
return self.dom.is_nonnegative(a.numer().LC())
def numer(self, a):
"""Returns numerator of `a`. """
return a.numer()
def denom(self, a):
"""Returns denominator of `a`. """
return a.denom()
def factorial(self, a):
"""Returns factorial of `a`. """
return self.dtype(self.dom.factorial(a))
|
bbozhev/flask-test | refs/heads/master | flask/lib/python2.7/site-packages/wtforms/form.py | 35 | import itertools
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from wtforms.compat import with_metaclass, iteritems, itervalues
from wtforms.meta import DefaultMeta
__all__ = (
'BaseForm',
'Form',
)
class BaseForm(object):
"""
Base Form Class. Provides core behaviour like field construction,
validation, and data and error proxying.
"""
def __init__(self, fields, prefix='', meta=DefaultMeta()):
"""
:param fields:
A dict or sequence of 2-tuples of partially-constructed fields.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param meta:
A meta instance which is used for configuration and customization
of WTForms behaviors.
"""
if prefix and prefix[-1] not in '-_;:/.':
prefix += '-'
self.meta = meta
self._prefix = prefix
self._errors = None
self._fields = OrderedDict()
if hasattr(fields, 'items'):
fields = fields.items()
translations = self._get_translations()
extra_fields = []
if meta.csrf:
self._csrf = meta.build_csrf(self)
extra_fields.extend(self._csrf.setup_form(self))
for name, unbound_field in itertools.chain(fields, extra_fields):
options = dict(name=name, prefix=prefix, translations=translations)
field = meta.bind_field(self, unbound_field, options)
self._fields[name] = field
def __iter__(self):
"""Iterate form fields in creation order."""
return iter(itervalues(self._fields))
def __contains__(self, name):
""" Returns `True` if the named field is a member of this form. """
return (name in self._fields)
def __getitem__(self, name):
""" Dict-style access to this form's fields."""
return self._fields[name]
def __setitem__(self, name, value):
""" Bind a field to this form. """
self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
def __delitem__(self, name):
""" Remove a field from this form. """
del self._fields[name]
def _get_translations(self):
"""
.. deprecated:: 2.0
`_get_translations` is being removed in WTForms 3.0, use
`Meta.get_translations` instead.
Override in subclasses to provide alternate translations factory.
Must return an object that provides gettext() and ngettext() methods.
"""
return self.meta.get_translations(self)
def populate_obj(self, obj):
"""
Populates the attributes of the passed `obj` with data from the form's
fields.
:note: This is a destructive operation; Any attribute with the same name
as a field will be overridden. Use with caution.
"""
for name, field in iteritems(self._fields):
field.populate_obj(obj, name)
def process(self, formdata=None, obj=None, data=None, **kwargs):
"""
Take form, object data, and keyword arg input and have the fields
process them.
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent.
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param data:
If provided, must be a dictionary of data. This is only used if
`formdata` is empty or not provided and `obj` does not contain
an attribute named the same as the field.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
formdata = self.meta.wrap_formdata(self, formdata)
if data is not None:
# XXX we want to eventually process 'data' as a new entity.
# Temporarily, this can simply be merged with kwargs.
kwargs = dict(data, **kwargs)
for name, field, in iteritems(self._fields):
if obj is not None and hasattr(obj, name):
field.process(formdata, getattr(obj, name))
elif name in kwargs:
field.process(formdata, kwargs[name])
else:
field.process(formdata)
def validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in iteritems(self._fields):
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
if not field.validate(self, extra):
success = False
return success
@property
def data(self):
return dict((name, f.data) for name, f in iteritems(self._fields))
@property
def errors(self):
if self._errors is None:
self._errors = dict((name, f.errors) for name, f in iteritems(self._fields) if f.errors)
return self._errors
class FormMeta(type):
"""
The metaclass for `Form` and any subclasses of `Form`.
`FormMeta`'s responsibility is to create the `_unbound_fields` list, which
is a list of `UnboundField` instances sorted by their order of
instantiation. The list is created at the first instantiation of the form.
If any fields are added/removed from the form, the list is cleared to be
re-generated on the next instantiaton.
Any properties which begin with an underscore or are not `UnboundField`
instances are ignored by the metaclass.
"""
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
cls._unbound_fields = None
cls._wtforms_meta = None
def __call__(cls, *args, **kwargs):
"""
Construct a new `Form` instance.
Creates the `_unbound_fields` list and the internal `_wtforms_meta`
subclass of the class Meta in order to allow a proper inheritance
hierarchy.
"""
if cls._unbound_fields is None:
fields = []
for name in dir(cls):
if not name.startswith('_'):
unbound_field = getattr(cls, name)
if hasattr(unbound_field, '_formfield'):
fields.append((name, unbound_field))
# We keep the name as the second element of the sort
# to ensure a stable sort.
fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
cls._unbound_fields = fields
# Create a subclass of the 'class Meta' using all the ancestors.
if cls._wtforms_meta is None:
bases = []
for mro_class in cls.__mro__:
if 'Meta' in mro_class.__dict__:
bases.append(mro_class.Meta)
cls._wtforms_meta = type('Meta', tuple(bases), {})
return type.__call__(cls, *args, **kwargs)
def __setattr__(cls, name, value):
"""
Add an attribute to the class, clearing `_unbound_fields` if needed.
"""
if name == 'Meta':
cls._wtforms_meta = None
elif not name.startswith('_') and hasattr(value, '_formfield'):
cls._unbound_fields = None
type.__setattr__(cls, name, value)
def __delattr__(cls, name):
"""
Remove an attribute from the class, clearing `_unbound_fields` if
needed.
"""
if not name.startswith('_'):
cls._unbound_fields = None
type.__delattr__(cls, name)
class Form(with_metaclass(FormMeta, BaseForm)):
"""
Declarative Form base class. Extends BaseForm's core behaviour allowing
fields to be defined on Form subclasses as class attributes.
In addition, form and instance input data are taken at construction time
and passed to `process()`.
"""
Meta = DefaultMeta
def __init__(self, formdata=None, obj=None, prefix='', data=None, meta=None, **kwargs):
"""
:param formdata:
Used to pass data coming from the enduser, usually `request.POST` or
equivalent. formdata should be some sort of request-data wrapper which
can get multiple parameters from the form input, and values are unicode
strings, e.g. a Werkzeug/Django/WebOb MultiDict
:param obj:
If `formdata` is empty or not provided, this object is checked for
attributes matching form field names, which will be used for field
values.
:param prefix:
If provided, all fields will have their name prefixed with the
value.
:param data:
Accept a dictionary of data. This is only used if `formdata` and
`obj` are not present.
:param meta:
If provided, this is a dictionary of values to override attributes
on this form's meta instance.
:param `**kwargs`:
If `formdata` is empty or not provided and `obj` does not contain
an attribute named the same as a field, form will assign the value
of a matching keyword argument to the field, if one exists.
"""
meta_obj = self._wtforms_meta()
if meta is not None and isinstance(meta, dict):
meta_obj.update_values(meta)
super(Form, self).__init__(self._unbound_fields, meta=meta_obj, prefix=prefix)
for name, field in iteritems(self._fields):
# Set all the fields to attributes so that they obscure the class
# attributes with the same names.
setattr(self, name, field)
self.process(formdata, obj, data=data, **kwargs)
def __setitem__(self, name, value):
raise TypeError('Fields may not be added to Form instances, only classes.')
def __delitem__(self, name):
del self._fields[name]
setattr(self, name, None)
def __delattr__(self, name):
if name in self._fields:
self.__delitem__(name)
else:
# This is done for idempotency, if we have a name which is a field,
# we want to mask it by setting the value to None.
unbound_field = getattr(self.__class__, name, None)
if unbound_field is not None and hasattr(unbound_field, '_formfield'):
setattr(self, name, None)
else:
super(Form, self).__delattr__(name)
def validate(self):
"""
Validates the form by calling `validate` on each field, passing any
extra `Form.validate_<fieldname>` validators to the field validator.
"""
extra = {}
for name in self._fields:
inline = getattr(self.__class__, 'validate_%s' % name, None)
if inline is not None:
extra[name] = [inline]
return super(Form, self).validate(extra)
|
xbmc/xbmc-rbp | refs/heads/master | tools/Linux/FEH-ARM.py | 47 | import os
import sys
import re
AvailableOutputs = []
Output = None
try:
from qt import *
AvailableOutputs.append("--error-output=Qt")
except:
pass
try:
import pygtk
pygtk.require('2.0')
import gtk
AvailableOutputs.append("--error-output=GTK")
except:
pass
try:
import pygame
import datetime
AvailableOutputs.append("--error-output=SDL")
except:
pass
def error(errorLine):
if Output == "--error-output=Qt":
createQt(errorLine)
elif Output == "--error-output=GTK":
createGTK(errorLine)
elif Output == "--error-output=SDL":
createSDL(errorLine)
else:
print errorLine
exit(1)
def createQt(errorLine):
app = QApplication(sys.argv)
QObject.connect(app, SIGNAL('lastWindowClosed()')
, app
, SLOT('quit()')
)
dialog = QDialog(None, "Error", 0, 0)
dialog.setCaption(dialog.tr("Error"))
layout=QVBoxLayout(dialog)
layout.setSpacing(6)
layout.setMargin(5)
label=QLabel(errorLine, dialog)
layout.addWidget(label)
bnExit=QPushButton("Quit", dialog, "add")
dialog.connect(bnExit, SIGNAL("clicked()"), qApp, SLOT("quit()"))
layout.addWidget(bnExit)
app.setMainWidget(dialog)
dialog.show()
app.exec_loop()
def createGTK(errorLine):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("Error")
vbox = gtk.VBox(False, 5)
window.add(vbox)
window.set_border_width(5)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_NONE)
label = gtk.Label(errorLine)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
button = gtk.Button("Quit")
button.connect_object("clicked", gtk.Widget.destroy, window)
vbox.pack_start(button, False, False, 0)
window.show_all ()
gtk.main()
def createSDL(errorLine):
pygame.init()
pygame.font.init()
pygame.display.set_caption("Error")
size = width, height = 800, 600
speed = [2, 2]
black = 0, 0, 0
screen = pygame.display.set_mode(size)
font = pygame.font.Font(None, 32)
autoQuit = 10
start = datetime.datetime.now()
finish = datetime.datetime.now()
delta = finish - start
while delta.seconds < autoQuit:
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN:
sys.exit()
screen.fill(black)
place = [200, 200]
for line in errorLine.split('\n'):
text = font.render(line, 1, (255,255,255) )
place[1] += font.size(line)[1]
screen.blit(text, text.get_rect().move(place))
quitline = "Press any button to continue ("
quitline += str(autoQuit - delta.seconds)
quitline += ")"
text = font.render(quitline, 1, (255,255,255) )
screen.blit(text, text.get_rect().move(200,400))
pygame.display.flip()
finish = datetime.datetime.now()
delta = finish - start
def badDirectRendering():
out = os.popen("glxinfo | grep \"direct rendering\"", 'r')
line = out.read()
direct = "Yes" not in line
out.close()
return direct
def badColorDepth():
out = os.popen('xdpyinfo | grep "depth of root"', 'r')
p = re.compile("([0-9]*) planes")
for line in out.readlines():
match = p.search(line)
if (match is not None):
if int(match.group(1)) >= 16:
bitDepth = False
else:
bitDepth = True
out.close()
return bitDepth
def possibleOutput(text):
return text in sys.argv and text in AvailableOutputs
if __name__=="__main__":
if len(AvailableOutputs) > 0:
Output = AvailableOutputs[0]
else:
Output = None
for text in sys.argv:
if possibleOutput(text):
Output = text
if "--no-test" in sys.argv:
exit(0)
if (badDirectRendering()):
error("XBMC needs hardware accelerated OpenGL rendering.\nInstall an appropriate graphics driver.\n\nPlease consult XBMC Wiki for supported hardware\nhttp://xbmc.org/wiki/?title=Supported_hardware")
if (badColorDepth()):
error("XBMC cannot run unless the\nscreen color depth is atleast 24 bit.\n\nPlease reconfigure your screen.")
|
zzicewind/nova | refs/heads/master | nova/virt/hyperv/imagecache.py | 42 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
class ImageCache(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_info = self._vhdutils.get_vhd_info(vhd_path)
vhd_size = vhd_info['MaxInternalSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise vmutils.HyperVException(
_("Cannot resize the image to a size smaller than the VHD "
"max. internal size: %(vhd_size)s. Requested disk size: "
"%(root_vhd_size)s") %
{'vhd_size': vhd_size, 'root_vhd_size': root_vhd_size}
)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance):
image_id = instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path,
instance.user_id,
instance.project_id)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd':
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
return vhd_path
def get_image_details(self, context, instance):
image_id = instance.image_ref
return images.get_info(context, image_id)
|
mdanielwork/intellij-community | refs/heads/master | python/helpers/pycharm/pytest_teamcity.py | 35 | import os
import sys
helpers_dir = os.getenv("PYCHARM_HELPERS_DIR", sys.path[0])
if sys.path[0] != helpers_dir:
sys.path.insert(0, helpers_dir)
from tcmessages import TeamcityServiceMessages
from pycharm_run_utils import adjust_sys_path
adjust_sys_path(False)
# Directory where test script exist
CURRENT_DIR_NAME = ""
if sys.argv:
last_arg = sys.argv[-1]
if os.path.isfile(last_arg):
CURRENT_DIR_NAME = os.path.dirname(last_arg)
else:
CURRENT_DIR_NAME = last_arg
if not str(last_arg).endswith(os.sep):
CURRENT_DIR_NAME = last_arg + os.sep
messages = TeamcityServiceMessages(prepend_linebreak=True)
if not "_jb_do_not_call_enter_matrix" in os.environ:
messages.testMatrixEntered()
try:
import pytest
PYVERSION = [int(x) for x in pytest.__version__.split(".")]
except:
import py
PYVERSION = [int(x) for x in py.__version__.split(".")]
def get_name(nodeid):
return nodeid.split("::")[-1]
def fspath_to_url(fspath):
return "file:///" + str(fspath).replace("\\", "/")
if PYVERSION > [1, 4, 0]:
items = {}
current_suite = None
current_file = None
current_file_suite = None
def pytest_collection_finish(session):
messages.testCount(len(session.items))
def pytest_runtest_logstart(nodeid, location):
path = "file://" + os.path.realpath(os.path.join(CURRENT_DIR_NAME, location[0]))
if location[1]:
path += ":" +str(location[1] + 1)
global current_suite, current_file, current_file_suite
current_file = nodeid.split("::")[0]
file_suite = current_file.split("/")[-1]
if file_suite != current_file_suite:
if current_suite:
messages.testSuiteFinished(current_suite)
if current_file_suite:
messages.testSuiteFinished(current_file_suite)
current_file_suite = file_suite
if current_file_suite:
messages.testSuiteStarted(current_file_suite, location=path)
if location[2].find(".") != -1:
suite = location[2].split(".")[0]
name = location[2].split(".")[-1]
else:
name = location[2]
splitted = nodeid.split("::")
try:
ind = splitted.index(name.split("[")[0])
except ValueError:
try:
ind = splitted.index(name)
except ValueError:
ind = 0
if splitted[ind-1] == current_file:
suite = None
else:
suite = current_suite
if suite != current_suite:
if current_suite:
messages.testSuiteFinished(current_suite)
current_suite = suite
if current_suite:
messages.testSuiteStarted(current_suite, location=path)
messages.testStarted(name, location=path)
items[nodeid] = name
def pytest_runtest_logreport(report):
name = items[report.nodeid]
if report.skipped:
messages.testIgnored(name)
elif report.failed: # Duration should be in ms, but report has s
messages.testFailed(name, details=report.longrepr, duration=int(report.duration * 1000))
elif report.when == "call":
messages.testFinished(name, duration=int(report.duration * 1000))
def pytest_sessionfinish(session, exitstatus):
if not messages.number_of_tests and not current_suite and not current_file_suite:
messages.testError("ERROR", "No tests found")
if current_suite:
messages.testSuiteFinished(current_suite)
if current_file_suite:
messages.testSuiteFinished(current_file_suite)
from _pytest.terminal import TerminalReporter
class PycharmTestReporter(TerminalReporter):
def __init__(self, config, file=None):
TerminalReporter.__init__(self, config, file)
def summary_errors(self):
reports = self.getreports('error')
if not reports:
return
for rep in self.stats['error']:
name = rep.nodeid.split("/")[-1]
location = None
if hasattr(rep, 'location'):
location, lineno, domain = rep.location
messages.testSuiteStarted(name, location=fspath_to_url(location))
messages.testStarted("ERROR", location=fspath_to_url(location))
TerminalReporter.summary_errors(self)
messages.testError("ERROR")
messages.testSuiteFinished(name)
else:
def pytest_collectstart(collector):
if collector.name != "()":
messages.testSuiteStarted(collector.name, location=fspath_to_url(collector.fspath))
def pytest_runtest_makereport(item, call):
if call.when == "setup":
fspath, lineno, msg = item.reportinfo()
url = fspath_to_url(fspath)
if lineno: url += ":" + str(lineno)
# messages.testStarted(item.name, location=url)
def pytest_runtest_logreport(report):
if report.item._args:
name = report.item.function.__name__ + str(report.item._args)
else:
name = report.item.name
if report.failed:
messages.testFailed(name, details=report.longrepr)
elif report.skipped:
messages.testIgnored(name)
else:
messages.testFinished(name)
def pytest_collectreport(report):
if report.collector.name != "()":
messages.testSuiteFinished(report.collector.name)
def pytest_itemstart(item, node=None):
if item._args:
name = item.function.__name__ + str(item._args)
else:
name = item.name
if hasattr(item, "_fslineno"):
path = fspath_to_url(item._fslineno[0]) + ":" + str(item._fslineno[1] + 1)
else:
path = fspath_to_url(item.fspath)
messages.testStarted(name, location=path)
try:
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
reporter = PycharmTestReporter(config, sys.stdout)
config.pluginmanager.unregister(name="terminalreporter")
config.pluginmanager.register(reporter, 'terminalreporter')
except AttributeError as e:
sys.stderr.write("Unable to set hookimpl. Some errors may be ignored. Make sure you use PyTest 2.8.0+. Error was {0}".format(e)) |
cvandeplas/plaso | refs/heads/master | plaso/parsers/winreg_plugins/run.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the Run/RunOnce Key plugins for Plaso."""
from plaso.events import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class RunUserPlugin(interface.KeyPlugin):
"""Windows Registry plugin for parsing user specific auto runs."""
NAME = 'winreg_run'
DESCRIPTION = u'Parser for run and run once Registry data.'
REG_TYPE = 'NTUSER'
REG_KEYS = [
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Run',
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce']
URLS = ['http://msdn.microsoft.com/en-us/library/aa376977(v=vs.85).aspx']
def GetEntries(
self, parser_context, key=None, registry_type=None, **unused_kwargs):
"""Collect the Values under the Run Key and return an event for each one.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
for value in key.GetValues():
# Ignore the default value.
if not value.name:
continue
# Ignore any value that is empty or that does not contain a string.
if not value.data or not value.DataIsString():
continue
text_dict = {}
text_dict[value.name] = value.data
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict, offset=key.offset,
urls=self.URLS, registry_type=registry_type,
source_append=': Run Key')
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
class RunSoftwarePlugin(RunUserPlugin):
"""Windows Registry plugin for parsing system wide auto runs."""
NAME = 'winreg_run_software'
REG_TYPE = 'SOFTWARE'
REG_KEYS = [
u'\\Microsoft\\Windows\\CurrentVersion\\Run',
u'\\Microsoft\\Windows\\CurrentVersion\\RunOnce',
u'\\Microsoft\\Windows\\CurrentVersion\\RunOnce\\Setup',
u'\\Microsoft\\Windows\\CurrentVersion\\RunServices',
u'\\Microsoft\\Windows\\CurrentVersion\\RunServicesOnce']
winreg.WinRegistryParser.RegisterPlugins([
RunUserPlugin, RunSoftwarePlugin])
|
QinerTech/QinerApps | refs/heads/master | openerp/addons/website_portal_sale/controllers/main.py | 17 | # -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp.addons.website_portal.controllers.main import website_account
class website_account(website_account):
@http.route(['/my/home'], type='http', auth="user", website=True)
def account(self, **kw):
""" Add sales documents to main account page """
response = super(website_account, self).account()
partner = request.env.user.partner_id
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sale', 'done'])
])
invoices = res_invoices.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['open', 'paid', 'cancelled'])
])
response.qcontext.update({
'date': datetime.date.today().strftime('%Y-%m-%d'),
'quotations': quotations,
'orders': orders,
'invoices': invoices,
})
return response
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal_sale.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
|
amisrs/one-eighty | refs/heads/master | venv2/lib/python2.7/site-packages/setuptools/windows_support.py | 1015 | import platform
import ctypes
def windows_only(func):
if platform.system() != 'Windows':
return lambda *args, **kwargs: None
return func
@windows_only
def hide_file(path):
"""
Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text.
"""
__import__('ctypes.wintypes')
SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
SetFileAttributes.restype = ctypes.wintypes.BOOL
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError()
|
Lujeni/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/iam.py | 10 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, user API keys, groups, roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource.
choices: ["user", "group", "role"]
type: str
required: true
name:
description:
- Name of IAM resource to create or identify.
required: true
type: str
new_name:
description:
- When I(state=update), will replace I(name) with I(new_name) on IAM resource.
type: str
new_path:
description:
- When I(state=update), will replace the path with new_path on the IAM resource.
type: str
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
choices: [ "present", "absent", "update" ]
type: str
path:
description:
- When creating or updating, specify the desired path of the resource.
- If I(state=present), it will replace the current path to match what is passed in when they do not match.
default: "/"
type: str
trust_policy:
description:
- The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role.
- Mutually exclusive with I(trust_policy_filepath).
version_added: "2.2"
type: dict
trust_policy_filepath:
description:
- The path to the trust policy document that grants an entity permission to assume the role.
- Mutually exclusive with I(trust_policy).
version_added: "2.2"
type: str
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"]
type: str
key_count:
description:
- When I(access_key_state=create) it will ensure this quantity of keys are present.
default: 1
type: int
access_key_ids:
description:
- A list of the keys that you want affected by the I(access_key_state) parameter.
type: list
groups:
description:
- A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed.
type: list
password:
description:
- When I(type=user) and either I(state=present) or I(state=update), define the users login password.
- Note that this will always return 'changed'.
type: str
update_password:
default: always
choices: ['always', 'on_create']
description:
- When to update user passwords.
- I(update_password=always) will ensure the password is set to I(password).
- I(update_password=on_create) will only set the password for newly created users.
type: str
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your
user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
loop:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
loop:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
loop: "{{ new_groups.results }}"
# Example of role with custom trust policy for Lambda service
- name: Create IAM role with custom trust relationship
iam:
iam_type: role
name: AAALambdaTestRole
state: present
trust_policy:
Version: '2012-10-17'
Statement:
- Action: sts:AssumeRole
Effect: Allow
Principal:
Service: lambda.amazonaws.com
'''
RETURN = '''
role_result:
description: the IAM.role dict returned by Boto
type: str
returned: if iam_type=role and state=present
sample: {
"arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role",
"assume_role_policy_document": "...truncated...",
"create_date": "2017-09-02T14:32:23Z",
"path": "/",
"role_id": "AROAA1B2C3D4E5F6G7H8I",
"role_name": "my-new-role"
}
roles:
description: a list containing the name of the currently defined roles
type: list
returned: if iam_type=role and state=present
sample: [
"my-new-role",
"my-existing-role-1",
"my-existing-role-2",
"my-existing-role-3",
"my-existing-role-...",
]
'''
import json
import traceback
try:
import boto.exception
import boto.iam
import boto.iam.connection
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def _paginate(func, attr):
'''
paginates the results from func by continuously passing in
the returned marker if the results were truncated. this returns
an iterator over the items in the returned response. `attr` is
the name of the attribute to iterate over in the response.
'''
finished, marker = False, None
while not finished:
res = func(marker=marker)
for item in getattr(res, attr):
yield item
finished = res.is_truncated == 'false'
if not finished:
marker = res.marker
def list_all_groups(iam):
return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
def list_all_users(iam):
return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
def list_all_roles(iam):
return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
def list_all_instance_profiles(iam):
return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.
create_access_key_result.
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_dependencies_first(module, iam, name):
changed = False
# try to delete any keys
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc())
# try to delete login profiles
try:
login_profile = iam.get_login_profiles(name).get_login_profile_response
iam.delete_login_profile(name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg:
module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc())
# try to detach policies
try:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'must detach all policies first' in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc())
# try to deactivate associated MFA devices
try:
mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', [])
for device in mfa_devices:
iam.deactivate_mfa_device(name, device['serial_number'])
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc())
return changed
def delete_user(module, iam, name):
changed = delete_dependencies_first(module, iam, name)
try:
iam.delete_user(name)
except boto.exception.BotoServerError as ex:
module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc())
else:
changed = True
return name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Password doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
new_keys = []
if key_state == 'create':
try:
while key_count > key_qty:
new_keys.append(iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key)
key_qty += 1
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if key_state in ('active', 'inactive'):
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
module.fail_json(msg="Supplied keys not found for %s. "
"Current keys: %s. "
"Supplied key(s): %s" %
(name, current_keys, keys)
)
if key_state == 'remove':
if access_key in current_keys:
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed, new_keys
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name, path=path) \
.create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
return changed, updated_role_list, iam_role_result, instance_profile_result
def delete_role(module, iam, name, role_list, prof_list):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam_role_result = iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
instance_profile_result = iam.delete_instance_profile(name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(required=True),
trust_policy_filepath=dict(default=None, required=False),
trust_policy=dict(type='dict', default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
trust_policy = module.params.get('trust_policy')
trust_policy_filepath = module.params.get('trust_policy_filepath')
key_ids = module.params.get('access_key_ids')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
try:
with open(trust_policy_filepath, 'r') as json_data:
trust_policy_doc = json.dumps(json.load(json_data))
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
elif trust_policy:
try:
trust_policy_doc = json.dumps(trust_policy)
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy)
else:
trust_policy_doc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = list_all_groups(iam)
orig_user_list = list_all_users(iam)
orig_role_list = list_all_roles(iam)
orig_prof_list = list_all_instance_profiles(iam)
except boto.exception.BotoServerError as err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed, new_key = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if new_key:
user_meta = {'access_keys': list(new_key)}
user_meta['access_keys'].extend(
[{'access_key_id': key, 'status': value} for key, value in key_list.items() if
key not in [it['access_key_id'] for it in new_key]])
else:
user_meta = {
'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
if name_change and new_name:
orig_name = name
name = new_name
if isinstance(groups, list):
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
created_keys=new_key, user_meta=user_meta)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
keys=key_list, created_keys=new_key, user_meta=user_meta)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
user_meta=user_meta)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exist. No update made." % name)
elif state == 'absent':
if user_exists:
try:
set_users_groups(module, iam, name, '')
name, changed = delete_user(module, iam, name)
module.exit_json(deleted_user=name, changed=changed)
except Exception as ex:
module.fail_json(changed=changed, msg=str(ex))
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(module=module, iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
module=module, iam=iam, name=name, new_name=new_name,
new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(module=module, iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list, role_result, instance_profile_result = create_role(
module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc)
elif state == 'absent':
changed, role_list, role_result, instance_profile_result = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
instance_profile_result=instance_profile_result)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.