repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
briehl/narrative
|
refs/heads/develop
|
src/biokbase/narrative/common/url_config.py
|
2
|
import os
import json
from .util import kbase_env
class Struct:
def __init__(self, **args):
self._urls = {}
self._urls.update(args)
def get_url(self, key):
return self._urls.get(key, None)
def __getattr__(self, key):
return self._urls.get(key, None)
def __str__(self):
return str(self._urls)
def __repr__(self):
return str(self._urls)
try:
nar_path = os.environ["NARRATIVE_DIR"]
config_json = open(os.path.join(nar_path, "src", "config.json")).read()
config = json.loads(config_json)
env = config["config"]
kbase_env.env = env
url_config = config[env] # fun, right?
URLS = Struct(**url_config)
except BaseException:
url_dict = {
"workspace": "https://kbase.us/services/ws/",
"invocation": "https://kbase.us/services/invocation",
"fba": "https://kbase.us/services/KBaseFBAModeling",
"genomeCmp": "https://kbase.us/services/genome_comparison/jsonrpc",
"trees": "https://kbase.us/services/trees",
"log_proxy_port": 32001,
"log_proxy_host": "172.17.42.1",
}
URLS = Struct(**url_dict)
|
jhcepas/npr
|
refs/heads/master
|
ete_dev/evol/utils.py
|
2
|
#!/usr/bin/python
# Author: Francois-Jose Serra
# Creation Date: 2010/04/22 16:05:46
# from __future__ import division # unnecessary?
from ete_dev import Tree
from math import log, exp
def get_rooting(tol, seed_species, agename = False):
'''
returns dict of species age for a given TOL and a given seed
**Example:**
::
tol = "((((((((Drosophila melanogaster,(Drosophila simulans,Drosophila secchellia)),(Drosophila yakuba,Drosophila erecta))[&&NHX:name=melanogaster subgroup],Drosophila ananassae)[&&NHX:name=melanogaster group],(Drosophila pseudoobscura,Drosophila persimilis)[&&NHX:name=obscura group])[&&NHX:name=Sophophora Old World],Drosophila willistoni)[&&NHX:name=subgenus Sophophora],(Drosophila grimshawi,(Drosophila virilis,Drosophila mojavensis))[&&NHX:name=subgenus Drosophila])[&&NHX:name=genus Drosophila],(Anopheles gambiae,Aedes aegypti)[&&NHX:name=Culicidae])[&&NHX:name=Arthropoda],Caenorhabditis elegans)[&&NHX:name=Animalia];"
seed = "Drosophila melanogaster"
ROOTING, age2name = get_rooting (tol, seed, True)
ROOTING == {"Aedes aegypti" : 7,
"Anopheles gambiae" : 7,
"Caenorhabditis elegans" : 8,
"Drosophila ananassae" : 3,
"Drosophila erecta" : 2,
"Drosophila grimshawi" : 6,
"Drosophila melanogaster" : 1,
"Drosophila mojavensis" : 6,
"Drosophila persimilis" : 4,
"Drosophila pseudoobscura": 4,
"Drosophila secchellia" : 1,
"Drosophila simulans" : 1,
"Drosophila virilis" : 6,
"Drosophila willistoni" : 5,
"Drosophila yakuba" : 2}
age2name == {1: "Drosophila melanogaster. Drosophila simulans. Drosophila secchellia",
2: "melanogaster subgroup",
3: "melanogaster group",
4: "Sophophora Old World",
5: "subgenus Sophophora",
6: "genus Drosophila",
7: "Arthropoda",
8: "Animalia"}
:argument seed_species: species name
:argument False agename: if True, also returns the inverse dictionary
:returns: ROOTING dictionary with age of each species
'''
tol = Tree (tol)
try:
node = tol.search_nodes (name=seed_species)[0]
except IndexError:
exit ('ERROR: Seed species not found in tree\n')
age = 1
ROOTING = {}
if agename:
age2name = {}
while not node.is_root():
node = node.up
for leaf in node.get_leaf_names():
if agename:
if node.name == 'NoName':
nam = '.'.join (node.get_leaf_names())
else:
nam = node.name
age2name.setdefault (age, nam)
ROOTING.setdefault (leaf, age)
age += 1
if agename:
return ROOTING, age2name
return ROOTING
def translate(sequence):
'''
little function to translate DNA to protein...
from: http://python.genedrift.org/
TODO : inseqgroup functions?
:argument sequence: string
:returns: translated sequence
'''
#dictionary with the genetic code
gencode = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'.', 'TAG':'.',
'TGC':'C', 'TGT':'C', 'TGA':'.', 'TGG':'W',
'---':'-', 'nnn':'x', 'NNN':'X'
}
ambig = {'Y':['A', 'G'], 'R':['C', 'T'], 'M':['G', 'T'], 'K':['A', 'C'], \
'S':['G', 'C'],'W':['A', 'T'], 'V':['C', 'G', 'T'], \
'H':['A', 'G', 'T'], 'D':['A', 'C', 'T'], 'B':['A', 'C', 'G'], \
'N':['A', 'C', 'G', 'T']}
proteinseq = ''
#loop to read DNA sequence in codons, 3 nucleotides at a time
sequence = sequence.upper()
for n in range(0, len(sequence), 3):
#checking to see if the dictionary has the key
try:
proteinseq += gencode[sequence[n:n+3]]
except KeyError:
newcod = []
for nt in sequence[n:n+3]:
if ambig.has_key(nt):
newcod.append(ambig[nt])
else :
newcod.append(list (nt))
aa = ''
for nt1 in newcod[0]:
for nt2 in newcod[1]:
for nt3 in newcod[2]:
try:
if aa == '':
aa = gencode[nt1+nt2+nt3]
elif gencode[nt1+nt2+nt3] != aa:
aa = 'X'
break
except KeyError:
aa = 'X'
break
proteinseq += aa
return proteinseq
# reused from pycogent
ROUND_ERROR = 1e-14
MAXLOG = 7.09782712893383996843E2
big = 4.503599627370496e15
biginv = 2.22044604925031308085e-16
MACHEP = 1.11022302462515654042E-16
def chi_high(x, df):
"""Returns right-hand tail of chi-square distribution (x to infinity).
df, the degrees of freedom, ranges from 1 to infinity (assume integers).
Typically, df is (r-1)*(c-1) for a r by c table.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
x = fix_rounding_error(x)
if x < 0:
raise ValueError, "chi_high: x must be >= 0 (got %s)." % x
if df < 1:
raise ValueError, "chi_high: df must be >= 1 (got %s)." % df
return igamc(df/2, x/2)
def fix_rounding_error(x):
"""If x is almost in the range 0-1, fixes it.
Specifically, if x is between -ROUND_ERROR and 0, returns 0.
If x is between 1 and 1+ROUND_ERROR, returns 1.
"""
if -ROUND_ERROR < x < 0:
return 0
elif 1 < x < 1+ROUND_ERROR:
return 1
else:
return x
def igamc(a,x):
"""Complemented incomplete Gamma integral: see Cephes docs."""
if x <= 0 or a <= 0:
return 1
if x < 1 or x < a:
return 1 - igam(a, x)
ax = a * log(x) - x - lgam(a)
if ax < -MAXLOG: #underflow
return 0
ax = exp(ax)
#continued fraction
y = 1 - a
z = x + y + 1
c = 0
pkm2 = 1
qkm2 = x
pkm1 = x + 1
qkm1 = z * x
ans = pkm1/qkm1
while 1:
c += 1
y += 1
z += 2
yc = y * c
pk = pkm1 * z - pkm2 * yc
qk = qkm1 * z - qkm2 * yc
if qk != 0:
r = pk/qk
t = abs((ans-r)/r)
ans = r
else:
t = 1
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if abs(pk) > big:
pkm2 *= biginv
pkm1 *= biginv
qkm2 *= biginv
qkm1 *= biginv
if t <= MACHEP:
break
return ans * ax
def lgam(x):
"""Natural log of the gamma fuction: see Cephes docs for details"""
sgngam = 1
if x < -34:
q = -x
w = lgam(q)
p = floor(q)
if p == q:
raise OverflowError, "lgam returned infinity."
i = p
if i & 1 == 0:
sgngam = -1
else:
sgngam = 1
z = q - p
if z > 0.5:
p += 1
z = p - q
z = q * sin(PI * z)
if z == 0:
raise OverflowError, "lgam returned infinity."
z = LOGPI - log(z) - w
return z
if x < 13:
z = 1
p = 0
u = x
while u >= 3:
p -= 1
u = x + p
z *= u
while u < 2:
if u == 0:
raise OverflowError, "lgam returned infinity."
z /= u
p += 1
u = x + p
if z < 0:
sgngam = -1
z = -z
else:
sgngam = 1
if u == 2:
return log(z)
p -= 2
x = x + p
p = x * polevl(x, GB)/polevl(x,GC)
return log(z) + p
if x > MAXLGM:
raise OverflowError, "Too large a value of x in lgam."
q = (x - 0.5) * log(x) - x + LS2PI
if x > 1.0e8:
return q
p = 1/(x*x)
if x >= 1000:
q += (( 7.9365079365079365079365e-4 * p
-2.7777777777777777777778e-3) *p
+ 0.0833333333333333333333) / x
else:
q += polevl(p, GA)/x
return q
def polevl(x, coef):
"""evaluates a polynomial y = C_0 + C_1x + C_2x^2 + ... + C_Nx^N
Coefficients are stored in reverse order, i.e. coef[0] = C_N
"""
result = 0
for c in coef:
result = result * x + c
return result
def igamc(a,x):
"""Complemented incomplete Gamma integral: see Cephes docs."""
if x <= 0 or a <= 0:
return 1
if x < 1 or x < a:
return 1 - igam(a, x)
ax = a * log(x) - x - lgam(a)
if ax < -MAXLOG: #underflow
return 0
ax = exp(ax)
#continued fraction
y = 1 - a
z = x + y + 1
c = 0
pkm2 = 1
qkm2 = x
pkm1 = x + 1
qkm1 = z * x
ans = pkm1/qkm1
while 1:
c += 1
y += 1
z += 2
yc = y * c
pk = pkm1 * z - pkm2 * yc
qk = qkm1 * z - qkm2 * yc
if qk != 0:
r = pk/qk
t = abs((ans-r)/r)
ans = r
else:
t = 1
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if abs(pk) > big:
pkm2 *= biginv
pkm1 *= biginv
qkm2 *= biginv
qkm1 *= biginv
if t <= MACHEP:
break
return ans * ax
def igam(a, x):
"""Left tail of incomplete gamma function: see Cephes docs for details"""
if x <= 0 or a <= 0:
return 0
if x > 1 and x > a:
return 1 - igamc(a,x)
#Compute x**a * exp(x) / Gamma(a)
ax = a * log(x) - x - lgam(a)
if ax < -MAXLOG: #underflow
return 0.0
ax = exp(ax)
#power series
r = a
c = 1
ans = 1
while 1:
r += 1
c *= x/r
ans += c
if c/ans <= MACHEP:
break
return ans * ax / a
#Coefficients for Gamma follow:
GA = [
8.11614167470508450300E-4,
-5.95061904284301438324E-4,
7.93650340457716943945E-4,
-2.77777777730099687205E-3,
8.33333333333331927722E-2,
]
GB = [
-1.37825152569120859100E3,
-3.88016315134637840924E4,
-3.31612992738871184744E5,
-1.16237097492762307383E6,
-1.72173700820839662146E6,
-8.53555664245765465627E5,
]
GC = [
1.00000000000000000000E0,
-3.51815701436523470549E2,
-1.70642106651881159223E4,
-2.20528590553854454839E5,
-1.13933444367982507207E6,
-2.53252307177582951285E6,
-2.01889141433532773231E6,
]
GP = [
1.60119522476751861407E-4,
1.19135147006586384913E-3,
1.04213797561761569935E-2,
4.76367800457137231464E-2,
2.07448227648435975150E-1,
4.94214826801497100753E-1,
9.99999999999999996796E-1,
]
GQ = [
-2.31581873324120129819E-5,
5.39605580493303397842E-4,
-4.45641913851797240494E-3,
1.18139785222060435552E-2,
3.58236398605498653373E-2,
-2.34591795718243348568E-1,
7.14304917030273074085E-2,
1.00000000000000000320E0,
]
biginv = 2.22044604925031308085e-16
|
40423117/2017springcd_hw
|
refs/heads/gh-pages
|
plugin/liquid_tags/vimeo.py
|
288
|
"""
Vimeo Tag
---------
This implements a Liquid-style vimeo tag for Pelican,
based on the youtube tag which is in turn based on
the jekyll / octopress youtube tag [1]_
Syntax
------
{% vimeo id [width height] %}
Example
-------
{% vimeo 10739054 640 480 %}
Output
------
<div style="width:640px; height:480px;">
<iframe
src="//player.vimeo.com/video/10739054?title=0&byline=0&portrait=0"
width="640" height="480" frameborder="0"
webkitallowfullscreen mozallowfullscreen allowfullscreen>
</iframe>
</div>
[1] https://gist.github.com/jamieowen/2063748
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% vimeo id [width height] %}"
VIMEO = re.compile(r'(\S+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('vimeo')
def vimeo(preprocessor, tag, markup):
width = 640
height = 390
vimeo_id = None
match = VIMEO.search(markup)
if match:
groups = match.groups()
vimeo_id = groups[0]
width = groups[2] or width
height = groups[3] or height
if vimeo_id:
vimeo_out = """
<div class="videobox">
<iframe
src="//player.vimeo.com/video/{vimeo_id}?title=0&byline=0&portrait=0"
width="{width}" height="{height}" frameborder="0"
webkitAllowFullScreen mozallowfullscreen allowFullScreen>
</iframe>
</div>
""".format(width=width, height=height, vimeo_id=vimeo_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return vimeo_out
# ---------------------------------------------------
# This import allows vimeo tag to be a Pelican plugin
from liquid_tags import register # noqa
|
adrienbrault/home-assistant
|
refs/heads/dev
|
homeassistant/components/automation/logbook.py
|
5
|
"""Describe logbook events."""
from homeassistant.components.logbook import LazyEventPartialState
from homeassistant.const import ATTR_ENTITY_ID, ATTR_NAME
from homeassistant.core import HomeAssistant, callback
from . import ATTR_SOURCE, DOMAIN, EVENT_AUTOMATION_TRIGGERED
@callback
def async_describe_events(hass: HomeAssistant, async_describe_event): # type: ignore
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event: LazyEventPartialState): # type: ignore
"""Describe a logbook event."""
data = event.data
message = "has been triggered"
if ATTR_SOURCE in data:
message = f"{message} by {data[ATTR_SOURCE]}"
return {
"name": data.get(ATTR_NAME),
"message": message,
"source": data.get(ATTR_SOURCE),
"entity_id": data.get(ATTR_ENTITY_ID),
"context_id": event.context_id,
}
async_describe_event(
DOMAIN, EVENT_AUTOMATION_TRIGGERED, async_describe_logbook_event
)
|
chajadan/dragonfly
|
refs/heads/master
|
dragonfly/apps/family/loader.py
|
5
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Loader
============================================================================
"""
import os.path
import glob
import time
import ConfigParser
import dragonfly
from ...error import DragonflyError
from ...grammar.rule_base import Rule
from ...grammar.context import AppContext
from ...actions import ActionBase
from ...parser import Parser
from .dirmon import DirectoryMonitor
from .family import CommandFamily
from .state import StateBase as State
from .state import Transition
from .command import CompoundCommand
from .loader_parser import CallElement
#===========================================================================
# Exception classes.
class LoaderError(DragonflyError):
pass
class SyntaxError(LoaderError):
pass
#===========================================================================
# Container base class similar to C's struct.
class ContainerBase(object):
_attributes = ()
def __init__(self, **kwargs):
for name, default in self._attributes:
if name in kwargs: value = kwargs.pop(name)
elif callable(default): value = default()
else: value = default
setattr(self, name, value)
if kwargs:
names = sorted(kwargs.keys())
raise ValueError("Unknown keyword args: %s" % names)
#===========================================================================
# Input information storage classes.
class InfoPhase1(ContainerBase):
class InfoPhase1Section(ContainerBase):
_attributes = (
("tag", None),
("items", dict),
)
_attributes = (
("sections", lambda: defaultdict(InfoPhase1Section)),
)
class InfoPhase2(ContainerBase):
class InfoPhase2Family(ContainerBase):
_attributes = (
("tag", None),
("name", None),
("description", None),
("states", dict),
("extras", list),
)
class InfoPhase2Choice(ContainerBase):
_attributes = (
("pairs", None),
)
_attributes = (
("families", lambda: defaultdict(InfoPhase2Family)),
("choices", lambda: defaultdict(InfoPhase2Choice)),
)
#===========================================================================
# Config parser class.
class CommandConfigParser(ConfigParser.RawConfigParser):
""" Customized ConfigParser class for parsing command files. """
def optionxform(self, option):
"""
Return option-string unchanged.
This overrides the default behavior of converting the
option-string to lowercase.
"""
return option
#===========================================================================
# Structured container classes for storing config file input.
class ConfigSection(object):
def __init__(self, name, items):
self._section_name = name
self._section_items = items
self._parse_name(self._section_name)
self._parse_items(self._section_items)
def _parse_name(self, name):
pass
def _parse_items(self, items):
pass
def _split_name_type_tag(self, name):
sep_index = name.find(":")
if sep_index == -1:
raise Exception("Invalid section name: %r" % name)
section_type = name[:sep_index].strip().lower()
section_tag = name[sep_index+1:].strip().lower()
if not section_type or not section_tag:
raise Exception("Invalid section name: %r" % name)
return section_type, section_tag
def _split_name_type_family_tag(self, name):
section_type, family_section_tag = self._split_name_type_tag(name)
sep_index = family_section_tag.find(".")
if sep_index == -1:
raise Exception("Invalid section name: %r" % name)
family_tag = family_section_tag[:sep_index].strip().lower()
if not family_tag:
raise Exception("Invalid section name: %r" % name)
return section_type, family_tag, family_section_tag
def _build_items_dict(self, items, error_on_duplicate_key=True):
items_dict = {}
for key, value in items:
if key in items_dict:
if error_on_duplicate_key:
raise Exception("Duplicate key: %r" % key)
continue
items_dict[key] = value
return items_dict
def _unescape_spoken_form(self, escaped):
escaped = escaped.strip()
if escaped[0] != '"' or escaped[-1] != '"':
raise Exception("Invalid spoken form: %r" % escaped)
unescaped = escaped[1:-1]
return unescaped
class FamilySection(ConfigSection):
def _parse_name(self, name):
section_type, family_tag = self._split_name_type_tag(name)
self.tag = family_tag
def _parse_items(self, items):
items_dict = self._build_items_dict(items)
# Parse "name" item, if present.
if "name" in items_dict:
name_string = items_dict.pop("name")
self.name = name_string.strip()
else:
raise Exception("Family section %r missing name item."
% self._section_name)
# Parse "description" item, if present.
if "description" in items_dict:
description_string = items_dict.pop("description")
self.description = description_string.strip()
else:
self.description = None
# Parse "length" item, if present.
if "length" in items_dict:
length_string = items_dict.pop("length")
self.length = int(length_string)
else:
self.length = 4
if items_dict:
raise Exception("Family section contains invalid items:"
" %s" % (sorted(items_dict.keys()),))
class StateSection(ConfigSection):
def _parse_name(self, name):
section_type, family_tag, tag = self._split_name_type_family_tag(name)
self.family_tag = family_tag
self.tag = tag
def _parse_items(self, items):
items_dict = self._build_items_dict(items)
# Parse "name" item, if present.
if "name" in items_dict:
self.name = items_dict.pop("name")
else:
self.name = self.tag
# Parse "include" item, if present.
if "include" in items_dict:
include_string = items_dict.pop("include")
include_names = [s.strip() for s in include_string.split(",")]
self.include = include_names
else:
self.include = ()
# Parse "context" item, if present.
if "context" in items_dict:
context_string = items_dict.pop("context")
self.context = context_string.strip()
else:
raise Exception("State section %r missing context item."
% self._section_name)
# Parse "next" item, if present.
if "next" in items_dict:
next_string = items_dict.pop("next")
self.next = next_string.strip()
else:
self.next = None
self.commands = []
for spoken, spec in items_dict.items():
spoken = self._unescape_spoken_form(spoken)
self.commands.append((spoken, spec))
class ExtrasSection(ConfigSection):
def _parse_name(self, name):
section_type, family_tag = self._split_name_type_tag(name)
self.tag = family_tag
def _parse_items(self, items):
self.extras = []
for item in items:
key, spec = item
self.extras.append((key, spec))
class ChoiceSection(ConfigSection):
def _parse_name(self, name):
section_type, family_tag = self._split_name_type_tag(name)
self.tag = family_tag
def _parse_items(self, items):
self.choices = []
for item in items:
spoken, spec = item
self.choices.append((spoken, spec))
class InputSpec(object):
pass
class InputInfo(ContainerBase):
_attributes = (
("family_sections", None),
("family_states", None),
("family_extras", None),
("choice_sections", None),
)
#===========================================================================
class Loader(object):
_extras_factories = {}
@classmethod
def register_extras_factory(cls, extra_type, factory):
cls._extras_factories[extra_type] = factory
def __init__(self):
self._dirmon = DirectoryMonitor()
self._loaded = False
self._families = []
def add_directories(self, *directories):
"""
Add one or more directories to be monitored.
Parameter:
- directories (sequence of *str* or *unicode*) --
the new directories to be monitored.
"""
self._dirmon.add_directories(*directories)
#-----------------------------------------------------------------------
def load(self):
""" Load families found in monitored directories. """
self._loaded = True
# Find files to be loaded.
directories = self._dirmon.directories
command_files = self._find_command_files(directories)
library_files = self._find_library_files(directories)
# print "command files", command_files
# print "library files", library_files
# Read and parse command files.
input_spec = self._parse_command_files(command_files)
# Load family objects into engine.
for family in self._families:
family.load()
def unload(self):
""" Unload all commands. """
for family in self._families:
family.unload()
self._loaded = False
def check_for_changes(self):
"""
Check for changes and reload if necessary.
Return *True* if files were reloaded; otherwise *False*.
"""
if not self._loaded or not self._dirmon.is_modified():
return False
self.unload()
self.load()
return True
#-----------------------------------------------------------------------
def _find_command_files(self, directories):
return self._glob_files_in_directories(directories, "*.txt")
def _find_library_files(self, directories):
return self._glob_files_in_directories(directories, "*.py")
def _glob_files_in_directories(self, directories, pattern):
files = []
for directory in directories:
directory_pattern = os.path.join(directory, pattern)
files.extend(glob.glob(directory_pattern))
return files
for directory in directories:
for filename in os.listdir(directory):
extension = os.path.splitext(filename)[1]
if extension.lower() == ".txt":
files.append(os.path.join(directory, filename))
def _parse_command_files(self, command_files):
# Create config parser and read command files.
config = CommandConfigParser()
for path in command_files:
config.read(path)
dispatchers = {
"family:": FamilySection,
"state:": StateSection,
"extras:": ExtrasSection,
"choice:": ChoiceSection,
}
# Iterate over all input sections and process each according
# to the section name's prefix.
sections = []
for section_name in config.sections():
items = config.items(section_name)
cooked_name = section_name.strip().lower()
section_instance = None
for prefix, section_class in dispatchers.items():
if cooked_name.startswith(prefix):
section_instance = section_class(section_name, items)
break
if not section_instance:
raise Exception("Invalid section: %r" % section_name)
sections.append(section_instance)
# Iterate over all processed section objects and handle each
# according to its type.
family_sections = {}
family_states = {}
family_extras = {}
choice_sections = {}
for section in sections:
if isinstance(section, FamilySection):
family_sections[section.tag] = section
elif isinstance(section, StateSection):
states = family_states.setdefault(section.family_tag, {})
states[section.tag] = section
elif isinstance(section, ExtrasSection):
family_extras[section.tag] = section
elif isinstance(section, ChoiceSection):
choice_sections[section.tag] = section
else:
raise Exception("Invalid section type: %r" % section_name)
input_info = InputInfo(
family_sections = family_sections,
family_states = family_states,
family_extras = family_extras,
choice_sections = choice_sections,
)
# Iterate over all family sections and construct each family.
families = []
for family_section in family_sections.values():
print "constructing family", family_section.tag
family = CommandFamily(name=family_section.name)
extras_section = family_extras[family_section.tag]
print " constructing extras", extras_section.tag, extras_section._section_name
self._build_family_extras(family, extras_section, input_info)
states_by_tag = self._init_family_states(family, family_section, input_info)
for state_section in family_states[family_section.tag].values():
print " constructing state", state_section.tag
# self._build_family_state(family, state_section, states_by_tag, input_info)
families.append(family)
return families
def _build_family_extras(self, family, extras_section, input_info):
element = CallElement()
parser = Parser(element)
for key, spec in extras_section.extras:
print "building extra", key, spec
# Parse the input spec.
output = parser.parse(spec)
output.name = key
print "output:", output
if not output:
raise SyntaxError("Invalid extra %r: %r" % (key, spec))
# Look for an appropriate extras factory and let it
# build the extra element.
if output.function not in self._extras_factories:
raise SyntaxError("Unknown extra type %r in %r" % (output.function, spec))
factory = self._extras_factories[output.function]
extra = factory.build(output)
family.add_extras(extra)
def _init_family_states(self, family, family_section, input_info):
sections = input_info.family_states[family_section.tag].values()
states = []
states_by_tag = {}
for section in sections:
state = State(section.name)
states.append(state)
states_by_tag[section.tag] = state
family.add_states(*states)
return states_by_tag
def _build_family_state_phase2(self, family, state_section, states_by_tag, input_info):
state = family.states[state_section.tag]
context = self._build_state_context(state_section.context)
# state.set_context(context)
for state_name in state_section.include:
included_state = states_by_tag[state_name]
state.include_states(included_state)
extras = []
for spoken, spec in state_section.commands:
command, poststate_name = self._build_state_command(spoken, spec, extras)
if not poststate_name:
poststate = state
else:
poststate = states_by_tag[poststate_name]
transition = Transition(command, state, poststate)
state.add_transitions(transition)
def _build_state_context(self, context_spec):
pass
def _build_state_command(self, spoken_form, command_spec, extras):
# Prepare namespace in which to evaluate command_spec.
namespace = {
# "__file__": path,
# "library": library,
}
for (key, value) in dragonfly.__dict__.items():
if isinstance(value, type) and issubclass(value, ActionBase):
namespace[key] = value
namespace["Repeat"] = dragonfly.Repeat
# Evaluate command specification.
expression = "(%s)" % command_spec
action = eval(expression, namespace, namespace)
# Wrapup action in extras in a compound command.
result = (CompoundCommand(spoken_form, action, extras=extras), None)
return result
#===========================================================================
class ExtrasFactoryBase(object):
def __init__(self):
pass
def build(self, call_info):
pass
#---------------------------------------------------------------------------
class DictationExtrasFactory(ExtrasFactoryBase):
def build(self, call_info):
name = call_info.name
if call_info.arguments:
raise SyntaxError("Invalid arguments for dictation extra: %r"
% (call_info.arguments,))
print "just build", dragonfly.Dictation(name)
return dragonfly.Dictation(name)
Loader.register_extras_factory("dictation", DictationExtrasFactory())
#---------------------------------------------------------------------------
class IntegerExtrasFactory(ExtrasFactoryBase):
def build(self, call_info):
name = call_info.name
min = call_info.arguments.pop().value
max = call_info.arguments.pop().value
if call_info.arguments:
raise SyntaxError("Invalid arguments for integer extra: %r"
% (call_info.arguments,))
print "just build", dragonfly.Integer(name=name, min=min, max=max)
return dragonfly.Integer(name=name, min=min, max=max)
Loader.register_extras_factory("integer", IntegerExtrasFactory())
#===========================================================================
class ExtrasFactoryError(Exception):
pass
class ContextFactoryError(Exception):
pass
class ActionFactoryError(Exception):
pass
class ContextFactoryBase(object):
pass
class ActionFactoryBase(object):
pass
|
jejimenez/django
|
refs/heads/master
|
tests/responses/tests.py
|
226
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=503)
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_change_status_code(self):
resp = HttpResponse()
resp.status_code = 503
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
def test_repr(self):
response = HttpResponse(content="Café :)".encode(UTF8), status=201)
expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">'
self.assertEqual(repr(response), expected)
|
ikoula/cloudstack
|
refs/heads/master
|
test/selenium/cspages/dashboard/dashboardpage.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import webdriver
from selenium.common.exceptions import *
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.common.action_chains import ActionChains as action
from common import Global_Locators
from cspages.cspage import CloudStackPage
from common.shared import *
class DashboardPage(CloudStackPage):
def __init__(self, browser):
self.browser = browser
self.active_item = ""
self.items = []
@try_except_decor
def get_active_item(self):
self.active_item = ""
lis = self.browser.find_elements_by_xpath("//*[@id='navigation']/ul/li")
for li in lis:
if li.get_attribute('class').find('active') > 0:
self.active_item = li.get_attribute('class')[:(li.get_attribute('class').index(' active'))]
return self.active_item
@try_except_decor
def get_items(self):
lis = self.browser.find_elements_by_xpath("//*[@id='navigation']/ul/li")
for li in lis:
item = li.get_attribute('class')[len('navigation-item '):]
if item.find('active') > 0:
item = item[:(item.index(' active'))]
if item.find('first') > 0:
item = item[:(item.index(' first'))]
if item.find('last') > 0:
item = item[:(item.index(' last'))]
self.items.append(item.lower())
return self.items
# import pdb
# pdb.set_trace()
@try_except_decor
def navigate_to(self, item_name):
if len(self.items) == 0:
self.get_items()
if item_name is None or len(item_name) == 0 or \
item_name.lower() not in self.items or \
(len(self.active_item) > 0 and self.active_item.lower().find(item_name.lower()) > 0):
return
lis = self.browser.find_elements_by_xpath("//*[@id='navigation']/ul/li")
for li in lis:
if li.get_attribute('class').lower().find(item_name.lower()) > 0:
li.click()
time.sleep(3)
return
|
adamheins/stercus
|
refs/heads/master
|
compiler/stercus/lexer.py
|
1
|
#!/usr/bin/env python
""" Stercus Language Lexer """
import argparse
from constants import BRACKETS
def lex(src):
""" Convert the Stercus source code into a list of tokens. """
lexed_src = ''
for char in src:
if char in BRACKETS['ALL']:
lexed_src += ' ' + char + ' '
else:
lexed_src += char
return lexed_src.split()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('src', help='Stercus source file.')
parser.add_argument('-o', '--output', help='Output file for lexed tokens',
dest='out')
args = parser.parse_args()
with open(args.src, 'r') as f:
src = f.read()
# Output the result.
tokens = lex(src)
if args.out:
with open(args.out, 'w') as f:
f.write(' '.join(tokens))
else:
print ' '.join(tokens)
if __name__ == '__main__':
main()
|
qsnake/py2js
|
refs/heads/master
|
tests/functions/divfloor.py
|
5
|
x = 23423
y = 213
z = x // y
print z
|
mrshu/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/sparse_pca.py
|
1
|
"""Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD
import warnings
import numpy as np
from ..utils import check_random_state, array2d
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose :
Degree of verbosity of the printed output.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E = dict_learning(X.T, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.method, n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
code_init=code_init,
dict_init=dict_init)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='dense_cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable,
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose :
degree of output the procedure will print
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
Sparse components extracted from the data.
`error_` : array
Vector of errors at each iteration.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None,
chunk_size=None):
if chunk_size is not None:
chunk_size = batch_size
warnings.warn("Parameter chunk_size has been renamed to "
"'batch_size' and will be removed in release 0.14.",
DeprecationWarning, stacklevel=2)
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _ = dict_learning_online(X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=self.random_state)
self.components_ = Vt.T
return self
|
JioEducation/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/safe_exec/lazymod.py
|
193
|
"""A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname)
return self.__dict__[name]
|
VapourApps/va_master
|
refs/heads/master
|
va_master/host_drivers/digitalocean_driver.py
|
1
|
try:
from . import base
from .base import Step, StepResult
except:
import base
from base import Step, StepResult
from base import bytes_to_int, int_to_bytes
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
import digitalocean
from digitalocean import Manager
import tornado.gen
import json, datetime, subprocess, os
PROVIDER_TEMPLATE = '''VAR_PROVIDER_NAME:
minion:
master: VAR_THIS_IP
master_type: str
# The name of the configuration profile to use on said minion
driver: digitalocean
personal_access_token: VAR_TOKEN
ssh_key_names: VAR_SSH_NAME
ssh_key_file: VAR_SSH_FILE
ssh_interface: private
private_networking: True
location: VAR_LOCATION
backups_enabled: True
ipv6: True
'''
# userdata_file: VAR_USERDATA_FILE
PROFILE_TEMPLATE = '''VAR_PROFILE_NAME:
provider: VAR_PROVIDER_NAME
image: VAR_IMAGE
size: VAR_SIZE
minion:
master: VAR_THIS_IP
grains:
role: VAR_ROLE
'''
class DigitalOceanDriver(base.DriverBase):
def __init__(self, provider_name = 'digital_ocean_provider', profile_name = 'digital_ocean_profile', host_ip = '', key_name = 'va_master_key', key_path = '/root/va_master_key', datastore_handler = None):
""" The standard issue init method. Borrows most of the functionality from the BaseDriver init method. """
kwargs = {
'driver_name' : 'digital_ocean',
'provider_template' : PROVIDER_TEMPLATE,
'profile_template' : PROFILE_TEMPLATE,
'provider_name' : provider_name,
'profile_name' : profile_name,
'host_ip' : host_ip,
'key_name' : key_name,
'key_path' : key_path,
'datastore_handler' : datastore_handler
}
#TODO get from api
#[x.name for x in m.get_all_regions()]
self.locations = [u'New York 1', u'Singapore 1', u'London 1', u'New York 3', u'Amsterdam 3', u'Frankfurt 1', u'Toronto 1', u'San Francisco 2', u'Bangalore 1']
super(DigitalOceanDriver, self).__init__(**kwargs)
def get_manager(self, provider):
manager = Manager(token=provider['token'])
self.manager = manager
return manager
@tornado.gen.coroutine
def driver_id(self):
""" Pretty simple. """
raise tornado.gen.Return('digital_ocean')
@tornado.gen.coroutine
def friendly_name(self):
""" Pretty simple """
raise tornado.gen.Return('Digital Ocean')
@tornado.gen.coroutine
def get_steps(self):
""" Digital Ocean requires an access token in order to generate the provider conf. """
steps = yield super(DigitalOceanDriver, self).get_steps()
steps[0].remove_fields(['username', 'password', 'location'])
steps[0].add_fields([
('token', 'Access token', 'str'),
('location', 'Location', 'options'),
])
steps.pop(1)
self.steps = steps
raise tornado.gen.Return(steps)
@tornado.gen.coroutine
def get_networks(self):
""" Gets the networks the salt-cloud method, at least for the moment. """
networks = yield super(DigitalOceanDriver, self).get_networks()
networks = ['Digital ocean has no networks. ']
raise tornado.gen.Return(networks)
@tornado.gen.coroutine
def get_sec_groups(self):
""" No security groups for digital ocean. """
sec_groups = ['DigitalOcean has no security groups. ']
raise tornado.gen.Return(sec_groups)
@tornado.gen.coroutine
def get_images(self):
""" Gets the images using salt-cloud. """
images = [x.name for x in self.manager.get_images()]
print ('Images are : ', images)
raise tornado.gen.Return(images)
@tornado.gen.coroutine
def get_sizes(self):
""" Gets the sizes using salt-cloud. """
sizes = [x.slug for x in self.manager.get_all_sizes()]
print ('Sizes are : ', sizes)
raise tornado.gen.Return(sizes)
@tornado.gen.coroutine
def get_servers(self, provider):
""" TODO """
manager = self.get_manager(provider)
servers = manager.get_all_droplets()
servers = [
{
'hostname' : x.name,
'ip' : x.ip_address,
'size' : x.size['slug'],
'used_disk' : str(x.size['disk']) + 'GB',
'used_ram' : x.memory,
'used_cpu' : x.vcpus,
'status' : x.status,
'cost' : 0, #TODO find way to calculate costs
'estimated_cost' : 0,
'provider' : provider['provider_name'],
} for x in servers
]
raise tornado.gen.Return(servers)
@tornado.gen.coroutine
def get_provider_status(self, provider):
""" TODO """
try:
self.get_manager(provider)
except Exception as e:
raise tornado.gen.Return({'success' : False, 'message' : e.message})
raise tornado.gen.Return({'success' : True, 'message' : ''})
@tornado.gen.coroutine
def get_provider_billing(self, provider):
#TODO provide should have some sort of costing mechanism, and we multiply used stuff by some price.
total_cost = 0
servers = yield self.get_servers(provider)
servers.append({
'hostname' : 'Other Costs',
'ip' : '',
'size' : '',
'used_disk' : 0,
'used_ram' : 0,
'used_cpu' : 0,
'status' : '',
'cost' : total_cost,
'estimated_cost' : 0,
'provider' : provider['provider_name'],
})
total_memory = sum([x['used_ram'] for x in servers]) * 2**20
total_memory = int_to_bytes(total_memory)
provider['memory'] = total_memory
for server in servers:
server['used_ram'] = int_to_bytes(server['used_ram'] * (2 ** 20))
billing_data = {
'provider' : provider,
'servers' : servers,
'total_cost' : total_cost
}
raise tornado.gen.Return(billing_data)
@tornado.gen.coroutine
def get_provider_data(self, provider, get_servers = True, get_billing = True):
""" TODO """
servers = yield self.get_servers(provider)
provider_usage = {
'max_cpus' : 'maxTotalCores',
'used_cpus' : 'totalCoresUsed',
'free_cpus' : 'maxTotalCores',
'max_ram' : 'maxTotalRAMSize',
'used_ram' : 'totalRAMUsed',
'free_ram' : 'maxTotalRAMSize',
'max_disk' : 'maxTotalVolumeGigabytes',
'used_disk' : 'totalGigabytesUsed',
'free_disk' : 'maxTotalVolumeGigabytes',
'max_servers' : 'maxTotalInstances',
'used_servers' : 'totalInstancesUsed',
'free_servers' : 'maxTotalInstances'
}
provider_data = {
'servers' : servers,
'provider_usage' : provider_usage,
'status' : {'success' : True, 'message': ''}
}
raise tornado.gen.Return(provider_data)
@tornado.gen.coroutine
def get_driver_trigger_functions(self):
conditions = ['domain_full', 'server_can_add_memory', 'server_can_add_cpu']
actions = ['server_new_terminal', 'server_cpu_full', 'server_memory_full', 'server_set_status', 'server_cpu_critical', 'server_cpu_warning', 'server_cpu_ok', 'server_memory_ok', 'server_memory_warning', 'server_memory_critical', 'server_cpu_full_ok', 'server_memory_full_ok']
return {'conditions' : conditions, 'actions' : actions}
@tornado.gen.coroutine
def server_action(self, provider, server_name, action):
""" Performs server actions using a nova client. """
try:
message = 'Success!'
manager = self.get_manager(provider)
servers = manager.get_all_droplets()
server = [x for x in servers if x.name == server_name][0]
except Exception as e:
import traceback
traceback.print_exc()
raise Exception('Could not get server' + server_name + '. ' + e.message)
try:
server_action = {
'delete' : server.destroy,
'reboot' : server.power_off,
'start' : server.power_on,
'stop' : server.shutdown,
# 'suspend' : server.suspend,
# 'resume' : server.resume,
}
server_action[action]()
except Exception as e:
import traceback
traceback.print_exc()
raise Exception('Action ' + action + ' was not performed on ' + server_name + '. Reason: ' + e.message)
raise tornado.gen.Return({'success' : True, 'message' : message, 'data' : {}})
@tornado.gen.coroutine
def validate_field_values(self, step_index, field_values):
""" Uses the base driver method, but adds the region tenant and identity_url variables, used in the configurations. """
options = {}
if step_index == -1:
options = {'location' : self.locations}
if step_index == 0:
self.token = field_values['token']
self.get_manager({'token' : self.token})
self.provider_vars['VAR_TOKEN'] = field_values['token']
images = yield self.get_images()
sizes =yield self.get_sizes()
self.field_values['images'] = images
self.field_values['sizes'] = sizes
options = {'image' : images, 'size' : sizes}
if step_index > 0:
step_index += 1
try:
print ('Validating step ', step_index)
step_result = yield super(DigitalOceanDriver, self).validate_field_values(step_index, field_values, options = options)
except:
import traceback
traceback.print_exc()
raise tornado.gen.Return(step_result)
@tornado.gen.coroutine
def create_server(self, host, data):
""" Works properly with the base driver method, but overwritten for bug tracking. """
try:
yield super(DigitalOceanDriver, self).create_minion(host, data)
#Once a server is created, we revert the templates to the originals for creating future servers.
self.profile_template = PROFILE_TEMPLATE
self.provider_template = PROVIDER_TEMPLATE
except:
import traceback
traceback.print_exc()
|
tbinjiayou/Odoo
|
refs/heads/master
|
addons/crm/base_partner_merge.py
|
75
|
#!/usr/bin/env python
from __future__ import absolute_import
from email.utils import parseaddr
import functools
import htmlentitydefs
import itertools
import logging
import operator
import psycopg2
import re
from ast import literal_eval
from openerp.tools import mute_logger
# Validation Library https://pypi.python.org/pypi/validate_email/1.1
from .validate_email import validate_email
import openerp
from openerp.osv import osv, orm
from openerp.osv import fields
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
pattern = re.compile("&(\w+?);")
_logger = logging.getLogger('base.partner.merge')
# http://www.php2python.com/wiki/function.html-entity-decode/
def html_entity_decode_char(m, defs=htmlentitydefs.entitydefs):
try:
return defs[m.group(1)]
except KeyError:
return m.group(0)
def html_entity_decode(string):
return pattern.sub(html_entity_decode_char, string)
def sanitize_email(email):
assert isinstance(email, basestring) and email
result = re.subn(r';|/|:', ',',
html_entity_decode(email or ''))[0].split(',')
emails = [parseaddr(email)[1]
for item in result
for email in item.split()]
return [email.lower()
for email in emails
if validate_email(email)]
def is_integer_list(ids):
return all(isinstance(i, (int, long)) for i in ids)
class ResPartner(osv.Model):
_inherit = 'res.partner'
_columns = {
'id': fields.integer('Id', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
}
class MergePartnerLine(osv.TransientModel):
_name = 'base.partner.merge.line'
_columns = {
'wizard_id': fields.many2one('base.partner.merge.automatic.wizard',
'Wizard'),
'min_id': fields.integer('MinID'),
'aggr_ids': fields.char('Ids', required=True),
}
_order = 'min_id asc'
class MergePartnerAutomatic(osv.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_columns = {
# Group by
'group_by_email': fields.boolean('Email'),
'group_by_name': fields.boolean('Name'),
'group_by_is_company': fields.boolean('Is Company'),
'group_by_vat': fields.boolean('VAT'),
'group_by_parent_id': fields.boolean('Parent Company'),
'state': fields.selection([('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')],
'State',
readonly=True,
required=True),
'number_group': fields.integer("Group of Contacts", readonly=True),
'current_line_id': fields.many2one('base.partner.merge.line', 'Current Line'),
'line_ids': fields.one2many('base.partner.merge.line', 'wizard_id', 'Lines'),
'partner_ids': fields.many2many('res.partner', string='Contacts'),
'dst_partner_id': fields.many2one('res.partner', string='Destination Contact'),
'exclude_contact': fields.boolean('A user associated to the contact'),
'exclude_journal_item': fields.boolean('Journal Items associated to the contact'),
'maximum_group': fields.integer("Maximum of Group of Contacts"),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(MergePartnerAutomatic, self).default_get(cr, uid, fields, context)
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner_ids = context['active_ids']
res['state'] = 'selection'
res['partner_ids'] = partner_ids
res['dst_partner_id'] = self._get_ordered_partner(cr, uid, partner_ids, context=context)[-1].id
return res
_defaults = {
'state': 'option'
}
def get_fk_on(self, cr, table):
q = """ SELECT cl1.relname as table,
att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
return cr.execute(q, (table,))
def _update_foreign_keys(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_foreign_keys for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
# find the many2one relation to a partner
proxy = self.pool.get('res.partner')
self.get_fk_on(cr, 'res_partner')
# ignore two tables
for table, column in cr.fetchall():
if 'base_partner_merge_' in table:
continue
partner_ids = tuple(map(int, src_partners))
query = "SELECT column_name FROM information_schema.columns WHERE table_name LIKE '%s'" % (table)
cr.execute(query, ())
columns = []
for data in cr.fetchall():
if data[0] != column:
columns.append(data[0])
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET %(column)s = %%s
WHERE
%(column)s = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
%(column)s = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner_id in partner_ids:
cr.execute(query, (dst_partner.id, partner_id, dst_partner.id))
else:
try:
with mute_logger('openerp.sql_db'), cr.savepoint():
query = 'UPDATE "%(table)s" SET %(column)s = %%s WHERE %(column)s IN %%s' % query_dic
cr.execute(query, (dst_partner.id, partner_ids,))
if column == proxy._parent_name and table == 'res_partner':
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id AND
cycle.id != cycle.parent_id
)
SELECT id FROM cycle WHERE id = parent_id AND id = %s
"""
cr.execute(query, (dst_partner.id,))
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
query = 'DELETE FROM %(table)s WHERE %(column)s = %%s' % query_dic
cr.execute(query, (partner_id,))
def _update_reference_fields(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_reference_fields for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
def update_records(model, src, field_model='model', field_id='res_id', context=None):
proxy = self.pool.get(model)
if proxy is None:
return
domain = [(field_model, '=', 'res.partner'), (field_id, '=', src.id)]
ids = proxy.search(cr, openerp.SUPERUSER_ID, domain, context=context)
try:
with mute_logger('openerp.sql_db'), cr.savepoint():
return proxy.write(cr, openerp.SUPERUSER_ID, ids, {field_id: dst_partner.id}, context=context)
except psycopg2.Error:
# updating fails, most likely due to a violated unique constraint
# keeping record with nonexistent partner_id is useless, better delete it
return proxy.unlink(cr, openerp.SUPERUSER_ID, ids, context=context)
update_records = functools.partial(update_records, context=context)
for partner in src_partners:
update_records('calendar', src=partner, field_model='model_id.model')
update_records('ir.attachment', src=partner, field_model='res_model')
update_records('mail.followers', src=partner, field_model='res_model')
update_records('mail.message', src=partner)
update_records('marketing.campaign.workitem', src=partner, field_model='object_id.model')
update_records('ir.model.data', src=partner)
proxy = self.pool['ir.model.fields']
domain = [('ttype', '=', 'reference')]
record_ids = proxy.search(cr, openerp.SUPERUSER_ID, domain, context=context)
for record in proxy.browse(cr, openerp.SUPERUSER_ID, record_ids, context=context):
try:
proxy_model = self.pool[record.model]
field_type = proxy_model._columns[record.name].__class__._type
except KeyError:
# unknown model or field => skip
continue
if field_type == 'function':
continue
for partner in src_partners:
domain = [
(record.name, '=', 'res.partner,%d' % partner.id)
]
model_ids = proxy_model.search(cr, openerp.SUPERUSER_ID, domain, context=context)
values = {
record.name: 'res.partner,%d' % dst_partner.id,
}
proxy_model.write(cr, openerp.SUPERUSER_ID, model_ids, values, context=context)
def _update_values(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_values for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
columns = dst_partner._columns
def write_serializer(column, item):
if isinstance(item, browse_record):
return item.id
else:
return item
values = dict()
for column, field in columns.iteritems():
if field._type not in ('many2many', 'one2many') and not isinstance(field, fields.function):
for item in itertools.chain(src_partners, [dst_partner]):
if item[column]:
values[column] = write_serializer(column, item[column])
values.pop('id', None)
parent_id = values.pop('parent_id', None)
dst_partner.write(values)
if parent_id and parent_id != dst_partner.id:
try:
dst_partner.write({'parent_id': parent_id})
except (osv.except_osv, orm.except_orm):
_logger.info('Skip recursive partner hierarchies for parent_id %s of partner: %s', parent_id, dst_partner.id)
@mute_logger('openerp.osv.expression', 'openerp.models')
def _merge(self, cr, uid, partner_ids, dst_partner=None, context=None):
proxy = self.pool.get('res.partner')
partner_ids = proxy.exists(cr, uid, list(partner_ids), context=context)
if len(partner_ids) < 2:
return
if len(partner_ids) > 3:
raise osv.except_osv(_('Error'), _("For safety reasons, you cannot merge more than 3 contacts together. You can re-open the wizard several times if needed."))
if openerp.SUPERUSER_ID != uid and len(set(partner.email for partner in proxy.browse(cr, uid, partner_ids, context=context))) > 1:
raise osv.except_osv(_('Error'), _("All contacts must have the same email. Only the Administrator can merge contacts with different emails."))
if dst_partner and dst_partner.id in partner_ids:
src_partners = proxy.browse(cr, uid, [id for id in partner_ids if id != dst_partner.id], context=context)
else:
ordered_partners = self._get_ordered_partner(cr, uid, partner_ids, context)
dst_partner = ordered_partners[-1]
src_partners = ordered_partners[:-1]
_logger.info("dst_partner: %s", dst_partner.id)
if openerp.SUPERUSER_ID != uid and self._model_is_installed(cr, uid, 'account.move.line', context=context) and \
self.pool.get('account.move.line').search(cr, openerp.SUPERUSER_ID, [('partner_id', 'in', [partner.id for partner in src_partners])], context=context):
raise osv.except_osv(_('Error'), _("Only the destination contact may be linked to existing Journal Items. Please ask the Administrator if you need to merge several contacts linked to existing Journal Items."))
call_it = lambda function: function(cr, uid, src_partners, dst_partner,
context=context)
call_it(self._update_foreign_keys)
call_it(self._update_reference_fields)
call_it(self._update_values)
_logger.info('(uid = %s) merged the partners %r with %s', uid, list(map(operator.attrgetter('id'), src_partners)), dst_partner.id)
dst_partner.message_post(body='%s %s'%(_("Merged with the following partners:"), ", ".join('%s<%s>(ID %s)' % (p.name, p.email or 'n/a', p.id) for p in src_partners)))
for partner in src_partners:
partner.unlink()
def clean_emails(self, cr, uid, context=None):
"""
Clean the email address of the partner, if there is an email field with
a mimum of two addresses, the system will create a new partner, with the
information of the previous one and will copy the new cleaned email into
the email field.
"""
context = dict(context or {})
proxy_model = self.pool['ir.model.fields']
field_ids = proxy_model.search(cr, uid, [('model', '=', 'res.partner'),
('ttype', 'like', '%2many')],
context=context)
fields = proxy_model.read(cr, uid, field_ids, context=context)
reset_fields = dict((field['name'], []) for field in fields)
proxy_partner = self.pool['res.partner']
context['active_test'] = False
ids = proxy_partner.search(cr, uid, [], context=context)
fields = ['name', 'var' 'partner_id' 'is_company', 'email']
partners = proxy_partner.read(cr, uid, ids, fields, context=context)
partners.sort(key=operator.itemgetter('id'))
partners_len = len(partners)
_logger.info('partner_len: %r', partners_len)
for idx, partner in enumerate(partners):
if not partner['email']:
continue
percent = (idx / float(partners_len)) * 100.0
_logger.info('idx: %r', idx)
_logger.info('percent: %r', percent)
try:
emails = sanitize_email(partner['email'])
head, tail = emails[:1], emails[1:]
email = head[0] if head else False
proxy_partner.write(cr, uid, [partner['id']],
{'email': email}, context=context)
for email in tail:
values = dict(reset_fields, email=email)
proxy_partner.copy(cr, uid, partner['id'], values,
context=context)
except Exception:
_logger.exception("There is a problem with this partner: %r", partner)
raise
return True
def close_cb(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
def _generate_query(self, fields, maximum_group=100):
sql_fields = []
for field in fields:
if field in ['email', 'name']:
sql_fields.append('lower(%s)' % field)
elif field in ['vat']:
sql_fields.append("replace(%s, ' ', '')" % field)
else:
sql_fields.append(field)
group_fields = ', '.join(sql_fields)
filters = []
for field in fields:
if field in ['email', 'name', 'vat']:
filters.append((field, 'IS NOT', 'NULL'))
criteria = ' AND '.join('%s %s %s' % (field, operator, value)
for field, operator, value in filters)
text = [
"SELECT min(id), array_agg(id)",
"FROM res_partner",
]
if criteria:
text.append('WHERE %s' % criteria)
text.extend([
"GROUP BY %s" % group_fields,
"HAVING COUNT(*) >= 2",
"ORDER BY min(id)",
])
if maximum_group:
text.extend([
"LIMIT %s" % maximum_group,
])
return ' '.join(text)
def _compute_selected_groupby(self, this):
group_by_str = 'group_by_'
group_by_len = len(group_by_str)
fields = [
key[group_by_len:]
for key in self._columns.keys()
if key.startswith(group_by_str)
]
groups = [
field
for field in fields
if getattr(this, '%s%s' % (group_by_str, field), False)
]
if not groups:
raise osv.except_osv(_('Error'),
_("You have to specify a filter for your selection"))
return groups
def next_cb(self, cr, uid, ids, context=None):
"""
Don't compute any thing
"""
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def _get_ordered_partner(self, cr, uid, partner_ids, context=None):
partners = self.pool.get('res.partner').browse(cr, uid, list(partner_ids), context=context)
ordered_partners = sorted(sorted(partners,
key=operator.attrgetter('create_date'), reverse=True),
key=operator.attrgetter('active'), reverse=True)
return ordered_partners
def _next_screen(self, cr, uid, this, context=None):
this.refresh()
values = {}
if this.line_ids:
# in this case, we try to find the next record.
current_line = this.line_ids[0]
current_partner_ids = literal_eval(current_line.aggr_ids)
values.update({
'current_line_id': current_line.id,
'partner_ids': [(6, 0, current_partner_ids)],
'dst_partner_id': self._get_ordered_partner(cr, uid, current_partner_ids, context)[-1].id,
'state': 'selection',
})
else:
values.update({
'current_line_id': False,
'partner_ids': [],
'state': 'finished',
})
this.write(values)
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def _model_is_installed(self, cr, uid, model, context=None):
proxy = self.pool.get('ir.model')
domain = [('model', '=', model)]
return proxy.search_count(cr, uid, domain, context=context) > 0
def _partner_use_in(self, cr, uid, aggr_ids, models, context=None):
"""
Check if there is no occurence of this group of partner in the selected
model
"""
for model, field in models.iteritems():
proxy = self.pool.get(model)
domain = [(field, 'in', aggr_ids)]
if proxy.search_count(cr, uid, domain, context=context):
return True
return False
def compute_models(self, cr, uid, ids, context=None):
"""
Compute the different models needed by the system if you want to exclude
some partners.
"""
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
models = {}
if this.exclude_contact:
models['res.users'] = 'partner_id'
if self._model_is_installed(cr, uid, 'account.move.line', context=context) and this.exclude_journal_item:
models['account.move.line'] = 'partner_id'
return models
def _process_query(self, cr, uid, ids, query, context=None):
"""
Execute the select request and write the result in this wizard
"""
proxy = self.pool.get('base.partner.merge.line')
this = self.browse(cr, uid, ids[0], context=context)
models = self.compute_models(cr, uid, ids, context=context)
cr.execute(query)
counter = 0
for min_id, aggr_ids in cr.fetchall():
if models and self._partner_use_in(cr, uid, aggr_ids, models, context=context):
continue
values = {
'wizard_id': this.id,
'min_id': min_id,
'aggr_ids': aggr_ids,
}
proxy.create(cr, uid, values, context=context)
counter += 1
values = {
'state': 'selection',
'number_group': counter,
}
this.write(values)
_logger.info("counter: %s", counter)
def start_process_cb(self, cr, uid, ids, context=None):
"""
Start the process.
* Compute the selected groups (with duplication)
* If the user has selected the 'exclude_XXX' fields, avoid the partners.
"""
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
groups = self._compute_selected_groupby(this)
query = self._generate_query(groups, this.maximum_group)
self._process_query(cr, uid, ids, query, context=context)
return self._next_screen(cr, uid, this, context)
def automatic_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
this = self.browse(cr, uid, ids[0], context=context)
this.start_process_cb()
this.refresh()
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def parent_migration_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
query = """
SELECT
min(p1.id),
array_agg(DISTINCT p1.id)
FROM
res_partner as p1
INNER join
res_partner as p2
ON
p1.email = p2.email AND
p1.name = p2.name AND
(p1.parent_id = p2.id OR p1.id = p2.parent_id)
WHERE
p2.id IS NOT NULL
GROUP BY
p1.email,
p1.name,
CASE WHEN p1.parent_id = p2.id THEN p2.id
ELSE p1.id
END
HAVING COUNT(*) >= 2
ORDER BY
min(p1.id)
"""
self._process_query(cr, uid, ids, query, context=context)
for line in this.line_ids:
partner_ids = literal_eval(line.aggr_ids)
self._merge(cr, uid, partner_ids, context=context)
line.unlink()
cr.commit()
this.write({'state': 'finished'})
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL,
parent_id = NULL
WHERE
parent_id = id
""")
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
def update_all_process_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# WITH RECURSIVE cycle(id, parent_id) AS (
# SELECT id, parent_id FROM res_partner
# UNION
# SELECT cycle.id, res_partner.parent_id
# FROM res_partner, cycle
# WHERE res_partner.id = cycle.parent_id AND
# cycle.id != cycle.parent_id
# )
# UPDATE res_partner
# SET parent_id = NULL
# WHERE id in (SELECT id FROM cycle WHERE id = parent_id);
this = self.browse(cr, uid, ids[0], context=context)
self.parent_migration_process_cb(cr, uid, ids, context=None)
list_merge = [
{'group_by_vat': True, 'group_by_email': True, 'group_by_name': True},
# {'group_by_name': True, 'group_by_is_company': True, 'group_by_parent_id': True},
# {'group_by_email': True, 'group_by_is_company': True, 'group_by_parent_id': True},
# {'group_by_name': True, 'group_by_vat': True, 'group_by_is_company': True, 'exclude_journal_item': True},
# {'group_by_email': True, 'group_by_vat': True, 'group_by_is_company': True, 'exclude_journal_item': True},
# {'group_by_email': True, 'group_by_is_company': True, 'exclude_contact': True, 'exclude_journal_item': True},
# {'group_by_name': True, 'group_by_is_company': True, 'exclude_contact': True, 'exclude_journal_item': True}
]
for merge_value in list_merge:
id = self.create(cr, uid, merge_value, context=context)
self.automatic_process_cb(cr, uid, [id], context=context)
cr.execute("""
UPDATE
res_partner
SET
is_company = NULL
WHERE
parent_id IS NOT NULL AND
is_company IS NOT NULL
""")
# cr.execute("""
# UPDATE
# res_partner as p1
# SET
# is_company = NULL,
# parent_id = (
# SELECT p2.id
# FROM res_partner as p2
# WHERE p2.email = p1.email AND
# p2.parent_id != p2.id
# LIMIT 1
# )
# WHERE
# p1.parent_id = p1.id
# """)
return self._next_screen(cr, uid, this, context)
def merge_cb(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
context = dict(context or {}, active_test=False)
this = self.browse(cr, uid, ids[0], context=context)
partner_ids = set(map(int, this.partner_ids))
if not partner_ids:
this.write({'state': 'finished'})
return {
'type': 'ir.actions.act_window',
'res_model': this._name,
'res_id': this.id,
'view_mode': 'form',
'target': 'new',
}
self._merge(cr, uid, partner_ids, this.dst_partner_id, context=context)
if this.current_line_id:
this.current_line_id.unlink()
return self._next_screen(cr, uid, this, context)
def auto_set_parent_id(self, cr, uid, ids, context=None):
assert is_integer_list(ids)
# select partner who have one least invoice
partner_treated = ['@gmail.com']
cr.execute(""" SELECT p.id, p.email
FROM res_partner as p
LEFT JOIN account_invoice as a
ON p.id = a.partner_id AND a.state in ('open','paid')
WHERE p.grade_id is NOT NULL
GROUP BY p.id
ORDER BY COUNT(a.id) DESC
""")
re_email = re.compile(r".*@")
for id, email in cr.fetchall():
# check email domain
email = re_email.sub("@", email or "")
if not email or email in partner_treated:
continue
partner_treated.append(email)
# don't update the partners if they are more of one who have invoice
cr.execute(""" SELECT *
FROM res_partner as p
WHERE p.id != %s AND p.email LIKE '%%%s' AND
EXISTS (SELECT * FROM account_invoice as a WHERE p.id = a.partner_id AND a.state in ('open','paid'))
""" % (id, email))
if len(cr.fetchall()) > 1:
_logger.info("%s MORE OF ONE COMPANY", email)
continue
# to display changed values
cr.execute(""" SELECT id,email
FROM res_partner
WHERE parent_id != %s AND id != %s AND email LIKE '%%%s'
""" % (id, id, email))
_logger.info("%r", cr.fetchall())
# upgrade
cr.execute(""" UPDATE res_partner
SET parent_id = %s
WHERE id != %s AND email LIKE '%%%s'
""" % (id, id, email))
return False
|
andyraib/data-storage
|
refs/heads/master
|
python_scripts/env/lib/python3.6/site-packages/matplotlib/tri/__init__.py
|
23
|
"""
Unstructured triangular grid functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .triangulation import *
from .tricontour import *
from .tritools import *
from .trifinder import *
from .triinterpolate import *
from .trirefine import *
from .tripcolor import *
from .triplot import *
|
elviscat/DBDS
|
refs/heads/master
|
DBDS_Step1.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# DBDS_Step1.py
# Author: Elvis Hsin-Hui Wu
# Date June 25, 2014
# Batch execute pyDruids.py and store analysis results in designated folder
# Usage: python DBDS_Step1.py -s example.nex
from Bio.Nexus import Nexus
from Bio import SeqIO
import os, sys
def delFolderContent(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
def readNexFile(fileName):
seq = {}
if fileName != '':
# read data set file, requires biopython
handle = open(fileName, 'r')
if fileName.endswith('nex') or fileName.endswith('nexus'):
seq = SeqIO.to_dict(SeqIO.parse(handle, 'nexus'))
elif fileName.endswith('phy') or fileName.endswith('phylip'):
seq = SeqIO.to_dict(SeqIO.parse(handle, 'phylip'))
elif fileName.endswith('fasta'):
seq = SeqIO.to_dict(SeqIO.parse(handle, 'fasta'))
handle.close()
return seq
def DruidsWrapper(fileName, outputFolderName, seq):
nchar = len(seq[seq.keys()[0]])
ntaxa = len(seq.keys())
print 'nchar:' + str(nchar)
print 'ntaxa:' + str(ntaxa)
# windowSize = min(windowSize,nchar/2) # limit windosize to thalf the data set length
# model_array = ["V", "H", "C", "V,H", "V,C", "H,C", "V,H,C"] # w/ combine model
model_array = ["V", "H", "C", "GC"]
for i in range(3,(nchar/2),3):
# for i in range(3,45,3): # Just for testing
#print i
for j in model_array:
# example
# os.system("python pyDruids.py -f " + fileName + " -w 24 -a V_H_C -o 111/test_window_24_V_H_C")
# os.system("python pyDruids.py -f " + fileName + " -w 21 -a V_H_C -o 111/test_window_21_V_H_C")
command = "python pyDruids.py -f " + fileName + " -w " + str(i) + " -a " + j + " -o " + outputFolderName + "/" + outputFolderName + "_test_window_"+ str(i) + "_" + j
# print command
os.system(command)
if __name__ == "__main__":
# Usage: python DBDS_Step1.py -s example.nex
fileName = ''
outputFolderName = ''
arguments = sys.argv
argumentsLength = len(arguments)
if argumentsLength == 2 and arguments[1] == '-h':
print 'Welcome to help!'
print 'Simple usage of this script: python DBDS_Step1.py -s example.nex'
elif argumentsLength == 3:
for i in range(1, argumentsLength-1):
if arguments[i] == '-s' and arguments[i+1] != '' and arguments[i+1] != '-s':
fileName = arguments[i+1]
outputFolderName = fileName.split(".")[0]
if not os.path.exists(outputFolderName):
os.mkdir(outputFolderName)
else:
delFolderContent(outputFolderName)
if len(readNexFile(fileName)) > 0:
# print len(readNexFile(fileName))
print 'Load sequence file ...'
seq = readNexFile(fileName)
DruidsWrapper(fileName, outputFolderName, seq)
else:
print 'Can\'t load the sequence, probably wrong format. Support format: nexus, phylip, fasta.'
else:
print 'Wrong parameter setting, please follow the usage: python DBDS_Step1.py -s example.nex'
else:
print 'Wrong parameter setting, please follow the usage: python DBDS_Step1.py -s example.nex'
|
pelya/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Lib/lib2to3/fixes/fix_xreadlines.py
|
53
|
"""Fix "for x in f.xreadlines()" -> "for x in f".
This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixXreadlines(fixer_base.BaseFix):
PATTERN = """
power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
|
power< any+ trailer< '.' no_call='xreadlines' > >
"""
def transform(self, node, results):
no_call = results.get("no_call")
if no_call:
no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
else:
node.replace([x.clone() for x in results["call"]])
|
tisba/bigcouch
|
refs/heads/master
|
couchjs/scons/scons-local-2.0.1/SCons/Tool/hpcc.py
|
61
|
"""SCons.Tool.hpcc
Tool-specific initialization for HP aCC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpcc.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import cc
def generate(env):
"""Add Builders and construction variables for aCC & cc to an Environment."""
cc.generate(env)
env['CXX'] = 'aCC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS +Z')
def exists(env):
return env.Detect('aCC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gm2211/vpnAlfredWorkflow
|
refs/heads/develop
|
src/alp/request/requests/packages/charade/codingstatemachine.py
|
206
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
|
desecho/hoa
|
refs/heads/master
|
hoa_project/hoa_project/settings.py
|
1
|
# Django settings for hoa_project project.
import os, django
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Moscow'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ru'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
DJANGO_DIR = os.path.dirname(os.path.realpath(django.__file__))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = BASE_DIR + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
DJANGO_DIR + '/contrib/admin/static',
BASE_DIR + '/static'
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
try:
from secret import SECRET_KEY
except ImportError:
def gen_secret_key():
here = lambda x: os.path.abspath(os.path.join(os.path.dirname(__file__), x))
print "Django's SECRET_KEY not found, generating new."
from random import choice
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
f = open(here('secret.py'), 'w')
f.write('''# Make this unique, and don't share it with anybody.\nSECRET_KEY = '%s'\n''' % secret_key)
f.close()
gen_secret_key()
from secret import SECRET_KEY
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'hoa_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'hoa_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
BASE_DIR + '/templates'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'nested_inlines',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'hoa',
'bootstrap_toolkit',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
FORMAT_DATE = '%d.%m.%Y'
START_DATE = '01.01.1900'
|
mjmeyer2013/is210-week-05-warmup
|
refs/heads/master
|
tests/test_smoke.py
|
245
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Smoke test for test suite."""
# Import Python libs
import unittest
class SmokeTestCase(unittest.TestCase):
"""Test cases to ensure that the test suite is operational."""
def test_true(self):
"""Tests that True is True."""
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
hyperized/ansible
|
refs/heads/devel
|
hacking/build_library/build_ansible/command_plugins/generate_man.py
|
68
|
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import pathlib
import sys
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = pathlib.Path(__file__).parents[4] / 'docs/templates/man.j2'
# from https://www.python.org/dev/peps/pep-0257/
def trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def get_options(optlist):
''' get actual options '''
opts = []
for opt in optlist:
res = {
'desc': opt.help,
'options': opt.option_strings
}
if isinstance(opt, argparse._StoreAction):
res['arg'] = opt.dest.upper()
elif not res['options']:
continue
opts.append(res)
return opts
def dedupe_groups(parser):
action_groups = []
for action_group in parser._action_groups:
found = False
for a in action_groups:
if a._actions == action_group._actions:
found = True
break
if not found:
action_groups.append(action_group)
return action_groups
def get_option_groups(option_parser):
groups = []
for action_group in dedupe_groups(option_parser)[1:]:
group_info = {}
group_info['desc'] = action_group.description
group_info['options'] = action_group._actions
group_info['group_obj'] = action_group
groups.append(group_info)
return groups
def opt_doc_list(parser):
''' iterate over options lists '''
results = []
for option_group in dedupe_groups(parser)[1:]:
results.extend(get_options(option_group._actions))
results.extend(get_options(parser._actions))
return results
# def opts_docs(cli, name):
def opts_docs(cli_class_name, cli_module_name):
''' generate doc structure from options '''
cli_name = 'ansible-%s' % cli_module_name
if cli_module_name == 'adhoc':
cli_name = 'ansible'
# WIth no action/subcommand
# shared opts set
# instantiate each cli and ask its options
cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
cli = cli_klass([cli_name])
# parse the common options
try:
cli.init_parser()
except Exception:
pass
# base/common cli info
docs = {
'cli': cli_module_name,
'cli_name': cli_name,
'usage': cli.parser.format_usage(),
'short_desc': cli.parser.description,
'long_desc': trim_docstring(cli.__doc__),
'actions': {},
'content_depth': 2,
}
option_info = {'option_names': [],
'options': [],
'groups': []}
for extras in ('ARGUMENTS'):
if hasattr(cli, extras):
docs[extras.lower()] = getattr(cli, extras)
common_opts = opt_doc_list(cli.parser)
groups_info = get_option_groups(cli.parser)
shared_opt_names = []
for opt in common_opts:
shared_opt_names.extend(opt.get('options', []))
option_info['options'] = common_opts
option_info['option_names'] = shared_opt_names
option_info['groups'].extend(groups_info)
docs.update(option_info)
# now for each action/subcommand
# force populate parser with per action options
def get_actions(parser, docs):
# use class attrs not the attrs on a instance (not that it matters here...)
try:
subparser = parser._subparsers._group_actions[0].choices
except AttributeError:
subparser = {}
depth = 0
for action, parser in subparser.items():
action_info = {'option_names': [],
'options': [],
'actions': {}}
# docs['actions'][action] = {}
# docs['actions'][action]['name'] = action
action_info['name'] = action
action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
# docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
action_doc_list = opt_doc_list(parser)
uncommon_options = []
for action_doc in action_doc_list:
# uncommon_options = []
option_aliases = action_doc.get('options', [])
for option_alias in option_aliases:
if option_alias in shared_opt_names:
continue
# TODO: use set
if option_alias not in action_info['option_names']:
action_info['option_names'].append(option_alias)
if action_doc in action_info['options']:
continue
uncommon_options.append(action_doc)
action_info['options'] = uncommon_options
depth = 1 + get_actions(parser, action_info)
docs['actions'][action] = action_info
return depth
action_depth = get_actions(cli.parser, docs)
docs['content_depth'] = action_depth + 1
docs['options'] = opt_doc_list(cli.parser)
return docs
class GenerateMan(Command):
name = 'generate-man'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(name=cls.name,
description='Generate cli documentation from cli docstrings')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE, help="path to jinja2 template")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
default='/tmp/', help="Output directory for rst files")
parser.add_argument("-f", "--output-format", action="store", dest="output_format",
default='man',
help="Output format for docs (the default 'man' or 'rst')")
parser.add_argument('cli_modules', help='CLI module name(s)', metavar='MODULE_NAME', nargs='*')
@staticmethod
def main(args):
template_file = args.template_file
template_path = os.path.expanduser(template_file)
template_dir = os.path.abspath(os.path.dirname(template_path))
template_basename = os.path.basename(template_file)
output_dir = os.path.abspath(args.output_dir)
output_format = args.output_format
cli_modules = args.cli_modules
# various cli parsing things checks sys.argv if the 'args' that are passed in are []
# so just remove any args so the cli modules dont try to parse them resulting in warnings
sys.argv = [sys.argv[0]]
allvars = {}
output = {}
cli_list = []
cli_bin_name_list = []
# for binary in os.listdir('../../lib/ansible/cli'):
for cli_module_name in cli_modules:
binary = os.path.basename(os.path.expanduser(cli_module_name))
if not binary.endswith('.py'):
continue
elif binary == '__init__.py':
continue
cli_name = os.path.splitext(binary)[0]
if cli_name == 'adhoc':
cli_class_name = 'AdHocCLI'
# myclass = 'AdHocCLI'
output[cli_name] = 'ansible.1.rst.in'
cli_bin_name = 'ansible'
else:
# myclass = "%sCLI" % libname.capitalize()
cli_class_name = "%sCLI" % cli_name.capitalize()
output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
cli_bin_name = 'ansible-%s' % cli_name
# FIXME:
allvars[cli_name] = opts_docs(cli_class_name, cli_name)
cli_bin_name_list.append(cli_bin_name)
cli_list = allvars.keys()
doc_name_formats = {'man': '%s.1.rst.in',
'rst': '%s.rst'}
for cli_name in cli_list:
# template it!
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template(template_basename)
# add rest to vars
tvars = allvars[cli_name]
tvars['cli_list'] = cli_list
tvars['cli_bin_name_list'] = cli_bin_name_list
tvars['cli'] = cli_name
if '-i' in tvars['options']:
print('uses inventory')
manpage = template.render(tvars)
filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
update_file_if_different(filename, to_bytes(manpage))
|
mayankcu/Django-social
|
refs/heads/master
|
venv/Lib/encodings/cp1250.py
|
593
|
""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1250',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\ufffe' # 0x83 -> UNDEFINED
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
u'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
u'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u02c7' # 0xA1 -> CARON
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mcannamela/fooskill
|
refs/heads/master
|
stochastics/tests/__init__.py
|
26
|
__author__ = 'michael'
|
kevin-hannegan/vps-droplet
|
refs/heads/master
|
website/lib/python2.7/site-packages/werkzeug/local.py
|
159
|
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import copy
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
|
jphire/solmuhub-testbed
|
refs/heads/master
|
create-results/profiler.py
|
1
|
import os
import sys
import json
import numpy as np
import scipy as sp
import scipy.stats
'''
This script is used to get the averages and confidence intervals of latency, CPU and memory usages.
Results are saved in a timestamped folder.
Example usage:
$ python avg.py 512 5
Where 512 represents image size and 5 is the total number of nodes, including controller node.
'''
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def run(filename, nodes, size):
profile = {}
means = {}
with open(filename) as file:
for line in file:
data = json.loads(line)['profiler']['data']
latency = json.loads(line)['profiler']['latency']
for key, val in data.items():
if not key in profile:
profile[key] = []
for value in val:
profile[key].append(value['time'])
for tag, val in profile.items():
means[tag] = mean_confidence_interval(val)
return {'means':means}
latest = 0
# size must be given for the cli
size = str(sys.argv[1])
sizes = ['256', '512', '1024']
nodeCount = sys.argv[2]
names = ['profile']
s = '-'
# Get latest logs' directory name
for dirname, dirnames, filenames in os.walk('../logs/profiler'):
for subdirname in dirnames:
tmp = int(subdirname)
latest = max(latest, tmp)
latestPath = os.path.join(dirname, str(latest))
results_path = '../results/' + str(latest)
if not os.path.exists(results_path):
os.makedirs(results_path)
for name in names:
outfile = os.path.join(results_path, name + "-" + str(size) + ".profile")
try:
os.remove(outfile)
except:
print ""
tags = ['feed_fetched', 'after_data_fetch', 'after_data_map', 'execution_end', 'piece_response_latency', 'dist_response_latency', 'after_reducer', 'before_sending_response', ]
for name in names:
for node in range(0, int(nodeCount)):
filename = s.join([str(node), 'node', size])
ret = run(os.path.join(latestPath, filename), str(node), str(size))['means']
outfile = os.path.join(results_path, str(node) + "-" + str(size) + ".profile")
with open(outfile, 'a') as out:
for tag in tags:
if (tag in ret.keys()):
print "\t".join([tag, str(ret[tag][0]), str(ret[tag][1]), str(ret[tag][2]), "0.6", "\n"])
out.write("\t".join([str(ret[tag][0]), str(ret[tag][1]), str(ret[tag][2]), "0.6", "\n"]))
|
interhui/py_task
|
refs/heads/master
|
task/task.py
|
3
|
# coding=utf-8
'''
task
@author: Huiyugeng
'''
import time
from job import job_listener
class Task():
def __init__(self, name, job, trigger, job_listener = None):
self.serial = str(time.time()) + name
self.name = name
self.job = job
self.job_listener = job_listener
self.trigger = trigger
self.status = 0 # -1:exception, 0:stop, 1:running, 2:pause
def get_name(self):
return self.name
def get_serial(self):
return self.serial
def set_status(self, status):
self.status = status
if self.job_listener != None and isinstance(self.job_listener, job_listener.JobListener):
self.job_listener.status_changed(status)
def get_status(self):
return self.status
def get_job(self):
return self.job
def get_job_listener(self):
return self.job_listener
def get_trigger(self):
return self.trigger
|
ftrader-bitcoinabc/bitcoin-abc
|
refs/heads/master
|
test/functional/test_framework/test_node.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
from enum import Enum
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
import urllib.parse
import collections
from .authproxy import JSONRPCException
from .messages import COIN, CTransaction, FromHex
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
p2p_port,
rpc_url,
wait_until,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, host, rpc_port, p2p_port, timewait, bitcoind,
bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.host = host
self.rpc_port = rpc_port
self.p2p_port = p2p_port
self.name = "testnode-{}".format(i)
self.rpc_timeout = timewait
self.binary = bitcoind
if not os.path.isfile(self.binary):
raise FileNotFoundError(
"Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOIND=<path/to/bitcoind> {}".format(self.binary, sys.argv[0]))
self.coverage_dir = coverage_dir
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the default list
# below.
# For those callers that need more flexibility, they can access the
# default args using the provided facilities.
# Note that common args are set in the config file (see
# initialize_datadir)
self.extra_args = extra_args
self.default_args = [
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=" + self.name,
"-noprinttoconsole",
]
if not os.path.isfile(bitcoin_cli):
raise FileNotFoundError(
"Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOINCLI=<path/to/bitcoin-cli> {}".format(bitcoin_cli, sys.argv[0]))
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.relay_fee_cache = None
self.log = logging.getLogger('TestFramework.node{}'.format(i))
# Whether to kill the node when this object goes away
self.cleanup_on_exit = True
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple(
'AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z',
'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg',
'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP',
'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR',
'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws',
'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi',
'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6',
'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8',
'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg',
'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node {}] {}".format(self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc is not None, self._node_msg(
"Error: RPC not initialized")
assert self.rpc_connected, self._node_msg(
"Error: No RPC connection")
return getattr(self.rpc, name)
def clear_default_args(self):
self.default_args.clear()
def extend_default_args(self, args):
self.default_args.extend(args)
def remove_default_args(self, args):
for rm_arg in args:
# Remove all occurrences of rm_arg in self.default_args:
# - if the arg is a flag (-flag), then the names must match
# - if the arg is a value (-key=value) then the name must starts
# with "-key=" (the '"' char is to avoid removing "-key_suffix"
# arg is "-key" is the argument to remove).
self.default_args = [def_arg for def_arg in self.default_args
if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')]
def start(self, extra_args=None, stdout=None,
stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(
dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(
dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are
# written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(
[self.binary] + self.default_args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.host, self.rpc_port),
self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC
# connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc is not None, self._node_msg(
"Error: RPC not initialized")
assert self.rpc_connected, self._node_msg(
"Error: RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError(
"Unexpected stderr {} != {}".format(stderr, expected_stderr))
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code ({}) when stopping".format(return_code))
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log,
flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(
self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout,
stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: {}'.format(e))
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr,
flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def relay_fee(self, cached=True):
if not self.relay_fee_cache or not cached:
self.relay_fee_cache = self.getnetworkinfo()["relayfee"]
return self.relay_fee_cache
def calculate_fee(self, tx):
""" Estimate the necessary fees (in sats) for an unsigned CTransaction assuming:
- the current relayfee on node
- all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr
- all inputs currently unsigned (empty scriptSig)
"""
billable_size_estimate = tx.billable_size()
# Add some padding for signatures / public keys
# 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes)
billable_size_estimate += len(tx.vin) * 107
# relay_fee gives a value in BCH per kB.
return int(self.relay_fee() / 1000 * billable_size_estimate * COIN)
def calculate_fee_from_txid(self, txid):
ctx = FromHex(CTransaction(), self.getrawtransaction(txid))
return self.calculate_fee(ctx)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value)
for (key, value) in kwargs.items()]
assert not (
pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: {}".format(command))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(
r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(
returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except json.JSONDecodeError:
return cli_stdout.rstrip("\n")
|
0k/odoo
|
refs/heads/master
|
addons/website/models/ir_ui_view.py
|
5
|
# -*- coding: utf-8 -*-
import copy
from lxml import etree, html
from openerp import SUPERUSER_ID, tools
from openerp.addons.website.models import website
from openerp.http import request
from openerp.osv import osv, fields
class view(osv.osv):
_inherit = "ir.ui.view"
_columns = {
'page': fields.boolean("Whether this view is a web page template (complete)"),
'website_meta_title': fields.char("Website meta title", size=70, translate=True),
'website_meta_description': fields.text("Website meta description", size=160, translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
'customize_show': fields.boolean("Show As Optional Inherit"),
'website_id': fields.many2one('website',ondelete='cascade', string="Website"),
}
_sql_constraints = [
('key_website_id_uniq', 'unique(key, website_id)',
'Key must be unique per website.'),
]
_defaults = {
'page': False,
'customize_show': False,
}
def _view_obj(self, cr, uid, view_id, context=None):
if isinstance(view_id, basestring):
return self.pool['ir.model.data'].xmlid_to_object(
cr, uid, view_id, raise_if_not_found=True, context=context
)
elif isinstance(view_id, (int, long)):
return self.browse(cr, uid, view_id, context=context)
# assume it's already a view object (WTF?)
return view_id
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
def _views_get(self, cr, uid, view_id, options=True, bundles=False, context=None, root=True):
""" For a given view ``view_id``, should return:
* the view itself
* all views inheriting from it, enabled or not
- but not the optional children of a non-enabled child
* all views called from it (via t-call)
"""
try:
view = self._view_obj(cr, uid, view_id, context=context)
except ValueError:
# Shall we log that ?
return []
while root and view.inherit_id:
view = view.inherit_id
result = [view]
node = etree.fromstring(view.arch)
xpath = "//t[@t-call]"
if bundles:
xpath += "| //t[@t-call-assets]"
for child in node.xpath(xpath):
try:
called_view = self._view_obj(cr, uid, child.get('t-call', child.get('t-call-assets')), context=context)
except ValueError:
continue
if called_view not in result:
result += self._views_get(cr, uid, called_view, options=options, bundles=bundles, context=context)
extensions = view.inherit_children_ids
if not options:
# only active children
extensions = (v for v in view.inherit_children_ids if v.active)
# Keep options in a deterministic order regardless of their applicability
for extension in sorted(extensions, key=lambda v: v.id):
for r in self._views_get(
cr, uid, extension,
# only return optional grandchildren if this child is enabled
options=extension.active,
context=context, root=False):
if r not in result:
result.append(r)
return result
def extract_embedded_fields(self, cr, uid, arch, context=None):
return arch.xpath('//*[@data-oe-model != "ir.ui.view"]')
def save_embedded_field(self, cr, uid, el, context=None):
Model = self.pool[el.get('data-oe-model')]
field = el.get('data-oe-field')
column = Model._all_columns[field].column
converter = self.pool['website.qweb'].get_converter_for(
el.get('data-oe-type'))
value = converter.from_html(cr, uid, Model, column, el)
if value is not None:
# TODO: batch writes?
Model.write(cr, uid, [int(el.get('data-oe-id'))], {
field: value
}, context=context)
def to_field_ref(self, cr, uid, el, context=None):
# filter out meta-information inserted in the document
attributes = dict((k, v) for k, v in el.items()
if not k.startswith('data-oe-'))
attributes['t-field'] = el.get('data-oe-expression')
out = html.html_parser.makeelement(el.tag, attrib=attributes)
out.tail = el.tail
return out
def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None):
# the root of the arch section shouldn't actually be replaced as it's
# not really editable itself, only the content truly is editable.
[view] = self.browse(cr, uid, [view_id], context=context)
arch = etree.fromstring(view.arch.encode('utf-8'))
# => get the replacement root
if not section_xpath:
root = arch
else:
# ensure there's only one match
[root] = arch.xpath(section_xpath)
root.text = replacement.text
root.tail = replacement.tail
# replace all children
del root[:]
for child in replacement:
root.append(copy.deepcopy(child))
return arch
@tools.ormcache_context(accepted_keys=('website_id',))
def get_view_id(self, cr, uid, xml_id, context=None):
if context and 'website_id' in context and not isinstance(xml_id, (int, long)):
domain = [('key', '=', xml_id), '|', ('website_id', '=', context['website_id']), ('website_id', '=', False)]
[xml_id] = self.search(cr, uid, domain, order='website_id', limit=1, context=context)
else:
xml_id = super(view, self).get_view_id(cr, uid, xml_id, context=context)
return xml_id
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if request and getattr(request, 'website_enabled', False):
engine='website.qweb'
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
if not context:
context = {}
company = self.pool['res.company'].browse(cr, SUPERUSER_ID, request.website.company_id.id, context=context)
qcontext = dict(
context.copy(),
website=request.website,
url_for=website.url_for,
slug=website.slug,
res_company=company,
user_id=self.pool.get("res.users").browse(cr, uid, uid),
translatable=context.get('lang') != request.website.default_lang_code,
editable=request.website.is_publisher(),
menu_data=self.pool['ir.ui.menu'].load_menus_root(cr, uid, context=context) if request.website.is_user() else None,
)
# add some values
if values:
qcontext.update(values)
# in edit mode ir.ui.view will tag nodes
context = dict(context, inherit_branding=qcontext.get('editable', False))
view_obj = request.website.get_template(id_or_xml_id)
if 'main_object' not in qcontext:
qcontext['main_object'] = view_obj
values = qcontext
return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context)
def _pretty_arch(self, arch):
# remove_blank_string does not seem to work on HTMLParser, and
# pretty-printing with lxml more or less requires stripping
# whitespace: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output
# so serialize to XML, parse as XML (remove whitespace) then serialize
# as XML (pretty print)
arch_no_whitespace = etree.fromstring(
etree.tostring(arch, encoding='utf-8'),
parser=etree.XMLParser(encoding='utf-8', remove_blank_text=True))
return etree.tostring(
arch_no_whitespace, encoding='unicode', pretty_print=True)
def save(self, cr, uid, res_id, value, xpath=None, context=None):
""" Update a view section. The view section may embed fields to write
:param str model:
:param int res_id:
:param str xpath: valid xpath to the tag to replace
"""
res_id = int(res_id)
arch_section = html.fromstring(
value, parser=html.HTMLParser(encoding='utf-8'))
if xpath is None:
# value is an embedded field on its own, not a view section
self.save_embedded_field(cr, uid, arch_section, context=context)
return
for el in self.extract_embedded_fields(cr, uid, arch_section, context=context):
self.save_embedded_field(cr, uid, el, context=context)
# transform embedded field back to t-field
el.getparent().replace(el, self.to_field_ref(cr, uid, el, context=context))
arch = self.replace_arch_section(cr, uid, res_id, xpath, arch_section, context=context)
self.write(cr, uid, res_id, {
'arch': self._pretty_arch(arch)
}, context=context)
view = self.browse(cr, SUPERUSER_ID, res_id, context=context)
if view.model_data_id:
view.model_data_id.write({'noupdate': True})
|
hexinatgithub/CLRS-1
|
refs/heads/master
|
C31-Number-Theoretic-Algorithms/euclid.py
|
9
|
#!/usr/bin/env python
# coding=utf-8
def gcd(a, b):
while b != 0:
tmp = b
b = a % b
a = tmp
return a
print gcd(69,99)
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Doc/includes/sqlite3/row_factory.py
|
44
|
import sqlite3
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
con = sqlite3.connect(":memory:")
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("select 1 as a")
print cur.fetchone()["a"]
|
char-lie/data_mining
|
refs/heads/master
|
lab3/counter.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sys import stdin, argv
from os import linesep
from math import log
from stoplist import stop_list
def get_count(words):
tfs = {}
for key in set(words):
tfs[key] = 0
for w in words:
tfs[w] += 1
return tfs
def group_n_grams(words, n):
if n < 2:
return [w for w in words if w not in stop_list]
return [' '.join(w for w in words[i:i+n])
for i in range(len(words)-n)
if words[i+n-1] not in stop_list
and words[i] not in stop_list]
if __name__ == '__main__':
n_grams_length = 1
if len(argv) > 1:
n_grams_length = int(argv[1])
texts = ([l.strip().split(' ') for l in stdin])
names = map(lambda text: text[0], texts)
texts = map(lambda text: group_n_grams(text[1:],
n_grams_length), texts)
tfs = map(get_count, texts)
idf = {}
for word in set(sum(texts, [])):
idf[word] = 0
for tf in tfs:
for word in tf:
idf[word] += 1
logN = log(len(texts))
for word in idf:
idf[word] = logN - log(idf[word])
tf_idfs = []
for i, tf in enumerate(tfs):
tf_idfs.append({})
for word in tf:
tf_idfs[i][word] = tf[word] * idf[word] / len(tf)
result = [(names[i], word, tf_idf[word])
for i, tf_idf in enumerate(tf_idfs)
for word in tf_idf]
result = sorted(result, key=lambda x: x[2], reverse=True)
print linesep.join('%s,%s,%f'%(r) for r in result)
|
frontibit/riestercoin
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
Ghalko/osf.io
|
refs/heads/develop
|
api_tests/applications/views/test_application_reset.py
|
26
|
import mock
from nose.tools import * # flake8: noqa
from website.models import ApiOAuth2Application, User
from website.util import api_v2_url
from tests.base import ApiTestCase
from tests.factories import ApiOAuth2ApplicationFactory, AuthUserFactory
def _get_application_reset_route(app):
path = "applications/{}/reset/".format(app.client_id)
return api_v2_url(path, base_route='/')
class TestApplicationReset(ApiTestCase):
def setUp(self):
super(TestApplicationReset, self).setUp()
self.user1 = AuthUserFactory()
self.user1_app = ApiOAuth2ApplicationFactory(owner=self.user1)
self.user1_reset_url = _get_application_reset_route(self.user1_app)
self.correct = {
'data': {
'id': self.user1_app.client_id,
'type': 'applications',
'attributes': {
'name': 'A shiny new application',
'home_url': 'http://osf.io',
'callback_url': 'https://cos.io'
}
}
}
@mock.patch('framework.auth.cas.CasClient.revoke_application_tokens')
def test_reset_revokes_tokens_and_resets(self, mock_method):
mock_method.return_value(True)
old_secret = self.user1_app.client_secret
self.user1_app.reset_secret(save=True)
mock_method.assert_called()
self.user1_app.reload()
assert_not_equal(old_secret, self.user1_app.client_secret)
@mock.patch('framework.auth.cas.CasClient.revoke_application_tokens')
def test_reset_does_not_save_without_save_param(self, mock_method):
mock_method.return_value(True)
old_secret = self.user1_app.client_secret
self.user1_app.reset_secret()
self.user1_app.reload()
assert_equal(old_secret, self.user1_app.client_secret)
@mock.patch('framework.auth.cas.CasClient.revoke_application_tokens')
def test_reset_url_revokes_tokens_and_resets(self, mock_method):
mock_method.return_value(True)
old_secret = self.user1_app.client_secret
res = self.app.post_json_api(self.user1_reset_url, self.correct, auth=self.user1.auth)
assert_equal(res.status_code, 201)
mock_method.assert_called()
self.user1_app.reload()
assert_not_equal(old_secret, self.user1_app.client_secret)
@mock.patch('website.oauth.models.ApiOAuth2Application.reset_secret')
def test_other_user_cannot_reset(self, mock_method):
mock_method.return_value(True)
old_secret = self.user1_app.client_secret
self.user2 = AuthUserFactory()
res = self.app.post_json_api(self.user1_reset_url, self.correct, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
mock_method.assert_not_called()
self.user1_app.reload()
assert_equal(old_secret, self.user1_app.client_secret)
@mock.patch('website.oauth.models.ApiOAuth2Application.reset_secret')
def test_unauth_user_cannot_reset(self, mock_method):
mock_method.return_value(True)
old_secret = self.user1_app.client_secret
res = self.app.post_json_api(self.user1_reset_url, self.correct, auth=None, expect_errors=True)
assert_equal(res.status_code, 401)
mock_method.assert_not_called()
self.user1_app.reload()
assert_equal(old_secret, self.user1_app.client_secret)
def tearDown(self):
super(TestApplicationReset, self).tearDown()
ApiOAuth2Application.remove()
User.remove()
|
Dallinger/Dallinger
|
refs/heads/master
|
demos/dlgr/demos/bartlett1932/models.py
|
1
|
from dallinger.nodes import Source
import random
class WarOfTheGhostsSource(Source):
"""A Source that reads in a random story from a file and transmits it."""
__mapper_args__ = {"polymorphic_identity": "war_of_the_ghosts_source"}
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md",
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read()
|
cwahbong/dargparse
|
refs/heads/master
|
dargparse/tests/simple_dargparse_test.py
|
2
|
__author__ = 'abdul'
import dargparse
import datetime
import unittest
from dargparse import dargparse
from datetime import datetime
from unittest import TestCase
###############################################################################
# Constants
###############################################################################
HELLO_PARSER ={
"prog": "hello-dargparse",
"description" : "This is hello world dargparse example",
"args": [
{
"name": "yourName",
"type" : "positional",
"help": "Your name",
"metavar" : "<YOUR NAME>",
"nargs": 1
},
{
"name": "printDate",
"type" : "optional",
"help": "print the current date",
"cmd_arg": [
"-d",
"--printDate"
],
"nargs": 0,
"action": "store_true",
"default": False
}
]
}
helloDargParser = None
###############################################################################
# SimpleDargparseTest class
###############################################################################
class SimpleDargparseTest(unittest.TestCase):
###########################################################################
def setUp(self):
global helloDargParser
helloDargParser = dargparse.build_parser(HELLO_PARSER)
###########################################################################
def tearDown(self):
pass
###########################################################################
def test_simple(self):
# parse the command line
args = ["--printDate" , "Abdulito"]
global helloDargParser
parsed_options = helloDargParser.parse_args(args)
now = datetime.now()
if parsed_options.printDate:
print "Hello %s! the time now is %s" % (parsed_options.yourName,now)
else:
print "Hello %s!" % (parsed_options.yourName)
|
gwillen/elements
|
refs/heads/alpha
|
qa/rpc-tests/walletbackup.py
|
131
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import BitcoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(BitcoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
BehavioralInsightsTeam/edx-platform
|
refs/heads/release-bit
|
pavelib/paver_tests/test_js_test.py
|
9
|
"""Unit tests for the Paver JavaScript testing tasks."""
import ddt
from mock import patch
from paver.easy import call_task
import pavelib.js_test
from pavelib.utils.envs import Env
from .utils import PaverTestCase
@ddt.ddt
class TestPaverJavaScriptTestTasks(PaverTestCase):
"""
Test the Paver JavaScript testing tasks.
"""
EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = u'find {platform_root}/reports/javascript -type f -delete'
EXPECTED_INSTALL_NPM_ASSETS_COMMAND = u'install npm_assets'
EXPECTED_KARMA_OPTIONS = (
u"{config_file} "
u"--single-run={single_run} "
u"--capture-timeout=60000 "
u"--junitreportpath="
u"{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
u"--browsers={browser}"
)
EXPECTED_COVERAGE_OPTIONS = (
u' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
)
EXPECTED_COMMANDS = [
u"make report_dir",
u'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
u"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
u'rm -rf test_root/log/auto_screenshots/*',
u"rm -rf /tmp/mako_[cl]ms",
]
def setUp(self):
super(TestPaverJavaScriptTestTasks, self).setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start()
self._mock_paver_needs.return_value = 0
# Cleanup mocks
self.addCleanup(self._mock_paver_needs.stop)
@ddt.data(
[""],
["--coverage"],
["--suite=lms"],
["--suite=lms --coverage"],
)
@ddt.unpack
def test_test_js_run(self, options_string):
"""
Test the "test_js_run" task.
"""
options = self.parse_options_string(options_string)
self.reset_task_messages()
call_task("pavelib.js_test.test_js_run", options=options)
self.verify_messages(options=options, dev_mode=False)
@ddt.data(
[""],
["--port=9999"],
["--suite=lms"],
["--suite=lms --port=9999"],
)
@ddt.unpack
def test_test_js_dev(self, options_string):
"""
Test the "test_js_run" task.
"""
options = self.parse_options_string(options_string)
self.reset_task_messages()
call_task("pavelib.js_test.test_js_dev", options=options)
self.verify_messages(options=options, dev_mode=True)
def parse_options_string(self, options_string):
"""
Parse a string containing the options for a test run
"""
parameters = options_string.split(" ")
suite = "all"
if "--system=lms" in parameters:
suite = "lms"
elif "--system=common" in parameters:
suite = "common"
coverage = "--coverage" in parameters
port = None
if "--port=9999" in parameters:
port = 9999
return {
"suite": suite,
"coverage": coverage,
"port": port,
}
def verify_messages(self, options, dev_mode):
"""
Verify that the messages generated when running tests are as expected
for the specified options and dev_mode.
"""
is_coverage = options['coverage']
port = options['port']
expected_messages = []
suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']]
expected_messages.extend(self.EXPECTED_COMMANDS)
if not dev_mode and not is_coverage:
expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format(
platform_root=self.platform_root
))
expected_messages.append(self.EXPECTED_INSTALL_NPM_ASSETS_COMMAND)
for suite in suites:
# Karma test command
karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)]
command_template = u'nodejs --max_old_space_size=4096 node_modules/.bin/karma start {options}'
expected_test_tool_command = command_template.format(
options=self.EXPECTED_KARMA_OPTIONS.format(
config_file=karma_config_file,
single_run='false' if dev_mode else 'true',
suite=suite,
platform_root=self.platform_root,
browser=Env.KARMA_BROWSER,
),
)
if is_coverage:
expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format(
platform_root=self.platform_root,
suite=suite
)
if port:
expected_test_tool_command += u" --port={port}".format(port=port)
expected_messages.append(expected_test_tool_command)
self.assertEquals(self.task_messages, expected_messages)
|
happylyang/django-adminplus
|
refs/heads/master
|
adminplus/tests.py
|
2
|
from django.template.loader import render_to_string
from django.test import TestCase
from django.views.generic import View
from adminplus.sites import AdminSitePlus
class AdminPlusTests(TestCase):
def test_decorator(self):
"""register_view works as a decorator."""
site = AdminSitePlus()
@site.register_view(r'foo/bar')
def foo_bar(request):
return 'foo-bar'
@site.register_view(r'foobar')
class FooBar(View):
def get(self, request):
return 'foo-bar'
urls = site.get_urls()
assert any(u.resolve('foo/bar') for u in urls)
assert any(u.resolve('foobar') for u in urls)
def test_function(self):
"""register_view works as a function."""
site = AdminSitePlus()
def foo(request):
return 'foo'
site.register_view('foo', view=foo)
class Foo(View):
def get(self, request):
return 'foo'
site.register_view('bar', view=Foo)
urls = site.get_urls()
assert any(u.resolve('foo') for u in urls)
assert any(u.resolve('bar') for u in urls)
def test_path(self):
"""Setting the path works correctly."""
site = AdminSitePlus()
def foo(request):
return 'foo'
site.register_view('foo', view=foo)
site.register_view('bar/baz', view=foo)
site.register_view('baz-qux', view=foo)
urls = site.get_urls()
foo_urls = [u for u in urls if u.resolve('foo')]
self.assertEqual(1, len(foo_urls))
bar_urls = [u for u in urls if u.resolve('bar/baz')]
self.assertEqual(1, len(bar_urls))
qux_urls = [u for u in urls if u.resolve('baz-qux')]
self.assertEqual(1, len(qux_urls))
def test_urlname(self):
"""Set URL pattern names correctly."""
site = AdminSitePlus()
@site.register_view('foo', urlname='foo')
def foo(request):
return 'foo'
@site.register_view('bar')
def bar(request):
return 'bar'
urls = site.get_urls()
foo_urls = [u for u in urls if u.resolve('foo')]
self.assertEqual(1, len(foo_urls))
self.assertEqual('foo', foo_urls[0].name)
bar_urls = [u for u in urls if u.resolve('bar')]
self.assertEqual(1, len(bar_urls))
assert bar_urls[0].name is None
def test_base_template(self):
"""Make sure extending the base template works everywhere."""
result = render_to_string('adminplus/test/index.html')
assert 'Ohai' in result
|
xkmato/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_chatstate/test_layer.py
|
68
|
from yowsup.layers import YowProtocolLayerTest
from yowsup.layers.protocol_chatstate import YowChatstateProtocolLayer
from yowsup.layers.protocol_chatstate.protocolentities import IncomingChatstateProtocolEntity, OutgoingChatstateProtocolEntity
class YowChatStateProtocolLayerTest(YowProtocolLayerTest, YowChatstateProtocolLayer):
def setUp(self):
YowChatstateProtocolLayer.__init__(self)
def test_send(self):
entity = OutgoingChatstateProtocolEntity(OutgoingChatstateProtocolEntity.STATE_PAUSED, "jid@s.whatsapp.net")
self.assertSent(entity)
def test_receive(self):
entity = IncomingChatstateProtocolEntity(IncomingChatstateProtocolEntity.STATE_TYPING, "jid@s.whatsapp.net")
self.assertReceived(entity)
|
kbsezginel/raspberry-pi
|
refs/heads/master
|
FlaskApp/usa_weather.py
|
1
|
import json
import requests
import time
def usa_weather(city='pittsburgh', state='pa', unit='C', precision=1):
"""
Get weekly forecast for given state and city using Yahoo public weather API.
"""
# Change to your location
url = requests.get('https://query.yahooapis.com/v1/public/yql?q=select item.forecast from weather.forecast where woeid in (select woeid from geo.places(1) where text="%s, %s")&format=json' % (city, state))
weather = json.loads(url.text)
results = {}
# Gets todays High and Low
results['high'] = weather['query']['results']['channel'][0]['item']['forecast']['high']
results['low'] = weather['query']['results']['channel'][0]['item']['forecast']['low']
results['code'] = weather['query']['results']['channel'][0]['item']['forecast']['code']
results['text'] = weather['query']['results']['channel'][0]['item']['forecast']['text']
if unit == 'C':
results['high'] = round((int(results['high']) - 32) * 5 / 9, precision)
results['low'] = round((int(results['low']) - 32) * 5 / 9, precision)
return results
if __name__ == '__main__':
weather = usa_weather(city='pittsburgh', state='pa', unit='C')
print(weather['text'])
print('High: %.1f' % weather['high'])
print('Low: %.1f' % weather['low'])
"""
Yahoo Weather Codes
0 tornado
1 tropical storm
2 hurricane
3 severe thunderstorms
4 thunderstorms
5 mixed rain and snow
6 mixed rain and sleet
7 mixed snow and sleet
8 freezing drizzle
9 drizzle
10 freezing rain
11 showers
12 showers
13 snow flurries
14 light snow showers
15 blowing snow
16 snow
17 hail
18 sleet
19 dust
20 foggy
21 haze
22 smoky
23 blustery
24 windy
25 cold
26 cloudy
27 mostly cloudy (night)
28 mostly cloudy (day)
29 partly cloudy (night)
30 partly cloudy (day)
31 clear (night)
32 sunny
33 fair (night)
34 fair (day)
35 mixed rain and hail
36 hot
37 isolated thunderstorms
38 scattered thunderstorms
39 scattered thunderstorms
40 scattered showers
41 heavy snow
42 scattered snow showers
43 heavy snow
44 partly cloudy
45 thundershowers
46 snow showers
47 isolated thundershowers
3200 not available
"""
|
arriam-lab2/amquery
|
refs/heads/develop
|
amquery/cli.py
|
1
|
"""
Command line interface module
"""
import click
import amquery.api as api
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group()
@click.option('--jobs', '-j', type=int, default=1, help='Number of jobs to start in parallel')
def cli(jobs):
"""
Amquery
"""
api.default_setup(jobs)
@cli.command()
@click.option("--edit", is_flag=True)
def config(edit):
"""
Print or edit a config file
"""
if edit:
api.edit_config()
else:
api.read_config()
@cli.group()
def db():
"""
Database-specific commands
"""
pass
@db.command()
def list():
"""
List registered databases
"""
api.list_databases()
def _validate_distance(ctx, param, value):
from amquery.core.distance import distances, DEFAULT_DISTANCE
if value:
try:
distances[value]
return value
except KeyError:
raise click.BadParameter('Distance must be one of {}'.format(", ".join(s for s in distances.keys())))
else:
return DEFAULT_DISTANCE
@db.command()
@click.argument('name', type=str, required=True)
@click.option("--distance", callback=_validate_distance, default="")
@click.option("--rep_tree", type=click.Path())
@click.option("--rep_set", type=click.Path())
@click.option("--biom_table", type=click.Path())
@click.option("--kmer_size", "-k", type=int, default=15)
def create(name, distance, rep_tree, rep_set, biom_table, kmer_size):
"""
Create a new database
"""
api.create_database(name,
distance=distance, rep_tree=rep_tree, rep_set=rep_set,
biom_table=biom_table, kmer_size=kmer_size)
@cli.command()
@click.option('--db', type=str, required=False)
@click.argument('input_files', type=click.Path(exists=True), nargs=-1, required=True)
def build(db, input_files):
"""
Build databases by indexing samples
"""
api.build_databases(input_files, db=db)
@cli.command()
@click.option('--db', type=str, required=False)
@click.argument('input_files', type=click.Path(exists=True), nargs=-1, required=True)
@click.option("--biom_table", type=click.Path())
def add(db, input_files, biom_table):
"""
Add samples to databases
"""
api.add_samples(input_files, db=db)
@cli.command()
@click.argument('database_name', type=str, required=True)
def stats(database_name):
"""
Show a general information about databases
"""
api.stats(database_name)
@cli.command()
@click.argument('database_name', type=str, required=True)
def list(database_name):
"""
Show a list of indexed samples
"""
samples = api.get_samples(database_name)
api.print_samples(samples)
@cli.command()
@click.option('--db', type=str, required=False)
@click.argument('sample_name', type=str, required=True)
@click.option('-k', type=int, required=True, default=5, help='Count of nearest neighbors')
def find(db, sample_name, k):
"""
Nearest neighbors search against databases
"""
results = api.search(sample_name, k, db=db)
api.print_search_results(results, k, db)
|
markjin1990/solr
|
refs/heads/master
|
lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/htmlentity.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# A simple python script to generate an HTML entity map and a regex alternation
# for inclusion in HTMLStripCharFilter.jflex.
def main():
print get_apache_license()
codes = {}
regex = re.compile(r'\s*<!ENTITY\s+(\S+)\s+"&(?:#38;)?#(\d+);"')
for line in get_entity_text().split('\n'):
match = regex.match(line)
if match:
key = match.group(1)
if key == 'quot': codes[key] = r'\"'
elif key == 'nbsp': codes[key] = ' ';
else : codes[key] = r'\u%04X' % int(match.group(2))
keys = sorted(codes)
first_entry = True
output_line = 'CharacterEntities = ( '
for key in keys:
new_entry = ('"%s"' if first_entry else ' | "%s"') % key
first_entry = False
if len(output_line) + len(new_entry) >= 80:
print output_line
output_line = ' '
output_line += new_entry
if key in ('quot','copy','gt','lt','reg','amp'):
new_entry = ' | "%s"' % key.upper()
if len(output_line) + len(new_entry) >= 80:
print output_line
output_line = ' '
output_line += new_entry
print output_line, ')'
print '%{'
print ' private static final Map<String,String> upperCaseVariantsAccepted'
print ' = new HashMap<>();'
print ' static {'
print ' upperCaseVariantsAccepted.put("quot", "QUOT");'
print ' upperCaseVariantsAccepted.put("copy", "COPY");'
print ' upperCaseVariantsAccepted.put("gt", "GT");'
print ' upperCaseVariantsAccepted.put("lt", "LT");'
print ' upperCaseVariantsAccepted.put("reg", "REG");'
print ' upperCaseVariantsAccepted.put("amp", "AMP");'
print ' }'
print ' private static final CharArrayMap<Character> entityValues'
print ' = new CharArrayMap<>(%i, false);' % len(keys)
print ' static {'
print ' String[] entities = {'
output_line = ' '
for key in keys:
new_entry = ' "%s", "%s",' % (key, codes[key])
if len(output_line) + len(new_entry) >= 80:
print output_line
output_line = ' '
output_line += new_entry
print output_line[:-1]
print ' };'
print ' for (int i = 0 ; i < entities.length ; i += 2) {'
print ' Character value = entities[i + 1].charAt(0);'
print ' entityValues.put(entities[i], value);'
print ' String upperCaseVariant = upperCaseVariantsAccepted.get(entities[i]);'
print ' if (upperCaseVariant != null) {'
print ' entityValues.put(upperCaseVariant, value);'
print ' }'
print ' }'
print " }"
print "%}"
def get_entity_text():
# The text below is taken verbatim from
# <http://www.w3.org/TR/REC-html40/sgml/entities.html>:
text = r"""
F.1. XHTML Character Entities
XHTML DTDs make available a standard collection of named character entities. Those entities are defined in this section.
F.1.1. XHTML Latin 1 Character Entities
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-lat1.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-lat1.ent.
<!-- ...................................................................... -->
<!-- XML-compatible ISO Latin 1 Character Entity Set for XHTML ............ -->
<!-- file: xhtml-lat1.ent
Typical invocation:
<!ENTITY % xhtml-lat1
PUBLIC "-//W3C//ENTITIES Latin 1 for XHTML//EN"
"xhtml-lat1.ent" >
%xhtml-lat1;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Latin 1 for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-lat1.ent"
Revision: Id: xhtml-lat1.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
-->
<!ENTITY nbsp " " ><!-- no-break space = non-breaking space, U+00A0 ISOnum -->
<!ENTITY iexcl "¡" ><!-- inverted exclamation mark, U+00A1 ISOnum -->
<!ENTITY cent "¢" ><!-- cent sign, U+00A2 ISOnum -->
<!ENTITY pound "£" ><!-- pound sign, U+00A3 ISOnum -->
<!ENTITY curren "¤" ><!-- currency sign, U+00A4 ISOnum -->
<!ENTITY yen "¥" ><!-- yen sign = yuan sign, U+00A5 ISOnum -->
<!ENTITY brvbar "¦" ><!-- broken bar = broken vertical bar, U+00A6 ISOnum -->
<!ENTITY sect "§" ><!-- section sign, U+00A7 ISOnum -->
<!ENTITY uml "¨" ><!-- diaeresis = spacing diaeresis, U+00A8 ISOdia -->
<!ENTITY copy "©" ><!-- copyright sign, U+00A9 ISOnum -->
<!ENTITY ordf "ª" ><!-- feminine ordinal indicator, U+00AA ISOnum -->
<!ENTITY laquo "«" ><!-- left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum -->
<!ENTITY not "¬" ><!-- not sign, U+00AC ISOnum -->
<!ENTITY shy "­" ><!-- soft hyphen = discretionary hyphen, U+00AD ISOnum -->
<!ENTITY reg "®" ><!-- registered sign = registered trade mark sign, U+00AE ISOnum -->
<!ENTITY macr "¯" ><!-- macron = spacing macron = overline = APL overbar, U+00AF ISOdia -->
<!ENTITY deg "°" ><!-- degree sign, U+00B0 ISOnum -->
<!ENTITY plusmn "±" ><!-- plus-minus sign = plus-or-minus sign, U+00B1 ISOnum -->
<!ENTITY sup2 "²" ><!-- superscript two = superscript digit two = squared, U+00B2 ISOnum -->
<!ENTITY sup3 "³" ><!-- superscript three = superscript digit three = cubed, U+00B3 ISOnum -->
<!ENTITY acute "´" ><!-- acute accent = spacing acute, U+00B4 ISOdia -->
<!ENTITY micro "µ" ><!-- micro sign, U+00B5 ISOnum -->
<!ENTITY para "¶" ><!-- pilcrow sign = paragraph sign, U+00B6 ISOnum -->
<!ENTITY middot "·" ><!-- middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum -->
<!ENTITY cedil "¸" ><!-- cedilla = spacing cedilla, U+00B8 ISOdia -->
<!ENTITY sup1 "¹" ><!-- superscript one = superscript digit one, U+00B9 ISOnum -->
<!ENTITY ordm "º" ><!-- masculine ordinal indicator, U+00BA ISOnum -->
<!ENTITY raquo "»" ><!-- right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum -->
<!ENTITY frac14 "¼" ><!-- vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum -->
<!ENTITY frac12 "½" ><!-- vulgar fraction one half = fraction one half, U+00BD ISOnum -->
<!ENTITY frac34 "¾" ><!-- vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum -->
<!ENTITY iquest "¿" ><!-- inverted question mark = turned question mark, U+00BF ISOnum -->
<!ENTITY Agrave "À" ><!-- latin capital A with grave = latin capital A grave, U+00C0 ISOlat1 -->
<!ENTITY Aacute "Á" ><!-- latin capital A with acute, U+00C1 ISOlat1 -->
<!ENTITY Acirc "Â" ><!-- latin capital A with circumflex, U+00C2 ISOlat1 -->
<!ENTITY Atilde "Ã" ><!-- latin capital A with tilde, U+00C3 ISOlat1 -->
<!ENTITY Auml "Ä" ><!-- latin capital A with diaeresis, U+00C4 ISOlat1 -->
<!ENTITY Aring "Å" ><!-- latin capital A with ring above = latin capital A ring, U+00C5 ISOlat1 -->
<!ENTITY AElig "Æ" ><!-- latin capital AE = latin capital ligature AE, U+00C6 ISOlat1 -->
<!ENTITY Ccedil "Ç" ><!-- latin capital C with cedilla, U+00C7 ISOlat1 -->
<!ENTITY Egrave "È" ><!-- latin capital E with grave, U+00C8 ISOlat1 -->
<!ENTITY Eacute "É" ><!-- latin capital E with acute, U+00C9 ISOlat1 -->
<!ENTITY Ecirc "Ê" ><!-- latin capital E with circumflex, U+00CA ISOlat1 -->
<!ENTITY Euml "Ë" ><!-- latin capital E with diaeresis, U+00CB ISOlat1 -->
<!ENTITY Igrave "Ì" ><!-- latin capital I with grave, U+00CC ISOlat1 -->
<!ENTITY Iacute "Í" ><!-- latin capital I with acute, U+00CD ISOlat1 -->
<!ENTITY Icirc "Î" ><!-- latin capital I with circumflex, U+00CE ISOlat1 -->
<!ENTITY Iuml "Ï" ><!-- latin capital I with diaeresis, U+00CF ISOlat1 -->
<!ENTITY ETH "Ð" ><!-- latin capital ETH, U+00D0 ISOlat1 -->
<!ENTITY Ntilde "Ñ" ><!-- latin capital N with tilde, U+00D1 ISOlat1 -->
<!ENTITY Ograve "Ò" ><!-- latin capital O with grave, U+00D2 ISOlat1 -->
<!ENTITY Oacute "Ó" ><!-- latin capital O with acute, U+00D3 ISOlat1 -->
<!ENTITY Ocirc "Ô" ><!-- latin capital O with circumflex, U+00D4 ISOlat1 -->
<!ENTITY Otilde "Õ" ><!-- latin capital O with tilde, U+00D5 ISOlat1 -->
<!ENTITY Ouml "Ö" ><!-- latin capital O with diaeresis, U+00D6 ISOlat1 -->
<!ENTITY times "×" ><!-- multiplication sign, U+00D7 ISOnum -->
<!ENTITY Oslash "Ø" ><!-- latin capital O with stroke = latin capital O slash, U+00D8 ISOlat1 -->
<!ENTITY Ugrave "Ù" ><!-- latin capital U with grave, U+00D9 ISOlat1 -->
<!ENTITY Uacute "Ú" ><!-- latin capital U with acute, U+00DA ISOlat1 -->
<!ENTITY Ucirc "Û" ><!-- latin capital U with circumflex, U+00DB ISOlat1 -->
<!ENTITY Uuml "Ü" ><!-- latin capital U with diaeresis, U+00DC ISOlat1 -->
<!ENTITY Yacute "Ý" ><!-- latin capital Y with acute, U+00DD ISOlat1 -->
<!ENTITY THORN "Þ" ><!-- latin capital THORN, U+00DE ISOlat1 -->
<!ENTITY szlig "ß" ><!-- latin small sharp s = ess-zed, U+00DF ISOlat1 -->
<!ENTITY agrave "à" ><!-- latin small a with grave = latin small a grave, U+00E0 ISOlat1 -->
<!ENTITY aacute "á" ><!-- latin small a with acute, U+00E1 ISOlat1 -->
<!ENTITY acirc "â" ><!-- latin small a with circumflex, U+00E2 ISOlat1 -->
<!ENTITY atilde "ã" ><!-- latin small a with tilde, U+00E3 ISOlat1 -->
<!ENTITY auml "ä" ><!-- latin small a with diaeresis, U+00E4 ISOlat1 -->
<!ENTITY aring "å" ><!-- latin small a with ring above = latin small a ring, U+00E5 ISOlat1 -->
<!ENTITY aelig "æ" ><!-- latin small ae = latin small ligature ae, U+00E6 ISOlat1 -->
<!ENTITY ccedil "ç" ><!-- latin small c with cedilla, U+00E7 ISOlat1 -->
<!ENTITY egrave "è" ><!-- latin small e with grave, U+00E8 ISOlat1 -->
<!ENTITY eacute "é" ><!-- latin small e with acute, U+00E9 ISOlat1 -->
<!ENTITY ecirc "ê" ><!-- latin small e with circumflex, U+00EA ISOlat1 -->
<!ENTITY euml "ë" ><!-- latin small e with diaeresis, U+00EB ISOlat1 -->
<!ENTITY igrave "ì" ><!-- latin small i with grave, U+00EC ISOlat1 -->
<!ENTITY iacute "í" ><!-- latin small i with acute, U+00ED ISOlat1 -->
<!ENTITY icirc "î" ><!-- latin small i with circumflex, U+00EE ISOlat1 -->
<!ENTITY iuml "ï" ><!-- latin small i with diaeresis, U+00EF ISOlat1 -->
<!ENTITY eth "ð" ><!-- latin small eth, U+00F0 ISOlat1 -->
<!ENTITY ntilde "ñ" ><!-- latin small n with tilde, U+00F1 ISOlat1 -->
<!ENTITY ograve "ò" ><!-- latin small o with grave, U+00F2 ISOlat1 -->
<!ENTITY oacute "ó" ><!-- latin small o with acute, U+00F3 ISOlat1 -->
<!ENTITY ocirc "ô" ><!-- latin small o with circumflex, U+00F4 ISOlat1 -->
<!ENTITY otilde "õ" ><!-- latin small o with tilde, U+00F5 ISOlat1 -->
<!ENTITY ouml "ö" ><!-- latin small o with diaeresis, U+00F6 ISOlat1 -->
<!ENTITY divide "÷" ><!-- division sign, U+00F7 ISOnum -->
<!ENTITY oslash "ø" ><!-- latin small o with stroke, = latin small o slash, U+00F8 ISOlat1 -->
<!ENTITY ugrave "ù" ><!-- latin small u with grave, U+00F9 ISOlat1 -->
<!ENTITY uacute "ú" ><!-- latin small u with acute, U+00FA ISOlat1 -->
<!ENTITY ucirc "û" ><!-- latin small u with circumflex, U+00FB ISOlat1 -->
<!ENTITY uuml "ü" ><!-- latin small u with diaeresis, U+00FC ISOlat1 -->
<!ENTITY yacute "ý" ><!-- latin small y with acute, U+00FD ISOlat1 -->
<!ENTITY thorn "þ" ><!-- latin small thorn with, U+00FE ISOlat1 -->
<!ENTITY yuml "ÿ" ><!-- latin small y with diaeresis, U+00FF ISOlat1 -->
<!-- end of xhtml-lat1.ent -->
F.1.2. XHTML Special Characters
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-special.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-special.ent.
<!-- ...................................................................... -->
<!-- XML-compatible ISO Special Character Entity Set for XHTML ............ -->
<!-- file: xhtml-special.ent
Typical invocation:
<!ENTITY % xhtml-special
PUBLIC "-//W3C//ENTITIES Special for XHTML//EN"
"xhtml-special.ent" >
%xhtml-special;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Special for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-special.ent"
Revision: Id: xhtml-special.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
Revisions:
2000-10-28: added ' and altered XML Predefined Entities for compatibility
-->
<!-- Relevant ISO entity set is given unless names are newly introduced.
New names (i.e., not in ISO 8879 [SGML] list) do not clash with
any existing ISO 8879 entity names. ISO 10646 [ISO10646] character
numbers are given for each character, in hex. Entity values are
decimal conversions of the ISO 10646 values and refer to the
document character set. Names are Unicode [UNICODE] names.
-->
<!-- C0 Controls and Basic Latin -->
<!ENTITY lt "&#60;" ><!-- less-than sign, U+003C ISOnum -->
<!ENTITY gt ">" ><!-- greater-than sign, U+003E ISOnum -->
<!ENTITY amp "&#38;" ><!-- ampersand, U+0026 ISOnum -->
<!ENTITY apos "'" ><!-- The Apostrophe (Apostrophe Quote, APL Quote), U+0027 ISOnum -->
<!ENTITY quot """ ><!-- quotation mark (Quote Double), U+0022 ISOnum -->
<!-- Latin Extended-A -->
<!ENTITY OElig "Œ" ><!-- latin capital ligature OE, U+0152 ISOlat2 -->
<!ENTITY oelig "œ" ><!-- latin small ligature oe, U+0153 ISOlat2 -->
<!-- ligature is a misnomer, this is a separate character in some languages -->
<!ENTITY Scaron "Š" ><!-- latin capital letter S with caron, U+0160 ISOlat2 -->
<!ENTITY scaron "š" ><!-- latin small letter s with caron, U+0161 ISOlat2 -->
<!ENTITY Yuml "Ÿ" ><!-- latin capital letter Y with diaeresis, U+0178 ISOlat2 -->
<!-- Spacing Modifier Letters -->
<!ENTITY circ "ˆ" ><!-- modifier letter circumflex accent, U+02C6 ISOpub -->
<!ENTITY tilde "˜" ><!-- small tilde, U+02DC ISOdia -->
<!-- General Punctuation -->
<!ENTITY ensp " " ><!-- en space, U+2002 ISOpub -->
<!ENTITY emsp " " ><!-- em space, U+2003 ISOpub -->
<!ENTITY thinsp " " ><!-- thin space, U+2009 ISOpub -->
<!ENTITY zwnj "‌" ><!-- zero width non-joiner, U+200C NEW RFC 2070 -->
<!ENTITY zwj "‍" ><!-- zero width joiner, U+200D NEW RFC 2070 -->
<!ENTITY lrm "‎" ><!-- left-to-right mark, U+200E NEW RFC 2070 -->
<!ENTITY rlm "‏" ><!-- right-to-left mark, U+200F NEW RFC 2070 -->
<!ENTITY ndash "–" ><!-- en dash, U+2013 ISOpub -->
<!ENTITY mdash "—" ><!-- em dash, U+2014 ISOpub -->
<!ENTITY lsquo "‘" ><!-- left single quotation mark, U+2018 ISOnum -->
<!ENTITY rsquo "’" ><!-- right single quotation mark, U+2019 ISOnum -->
<!ENTITY sbquo "‚" ><!-- single low-9 quotation mark, U+201A NEW -->
<!ENTITY ldquo "“" ><!-- left double quotation mark, U+201C ISOnum -->
<!ENTITY rdquo "”" ><!-- right double quotation mark, U+201D ISOnum -->
<!ENTITY bdquo "„" ><!-- double low-9 quotation mark, U+201E NEW -->
<!ENTITY dagger "†" ><!-- dagger, U+2020 ISOpub -->
<!ENTITY Dagger "‡" ><!-- double dagger, U+2021 ISOpub -->
<!ENTITY permil "‰" ><!-- per mille sign, U+2030 ISOtech -->
<!-- lsaquo is proposed but not yet ISO standardized -->
<!ENTITY lsaquo "‹" ><!-- single left-pointing angle quotation mark, U+2039 ISO proposed -->
<!-- rsaquo is proposed but not yet ISO standardized -->
<!ENTITY rsaquo "›" ><!-- single right-pointing angle quotation mark, U+203A ISO proposed -->
<!ENTITY euro "€" ><!-- euro sign, U+20AC NEW -->
<!-- end of xhtml-special.ent -->
F.1.3. XHTML Mathematical, Greek, and Symbolic Characters
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-symbol.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-symbol.ent.
<!-- ...................................................................... -->
<!-- ISO Math, Greek and Symbolic Character Entity Set for XHTML .......... -->
<!-- file: xhtml-symbol.ent
Typical invocation:
<!ENTITY % xhtml-symbol
PUBLIC "-//W3C//ENTITIES Symbols for XHTML//EN"
"xhtml-symbol.ent" >
%xhtml-symbol;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Symbols for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-symbol.ent"
Revision: Id: xhtml-symbol.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
-->
<!-- Relevant ISO entity set is given unless names are newly introduced.
New names (i.e., not in ISO 8879 [SGML] list) do not clash with
any existing ISO 8879 entity names. ISO 10646 [ISO10646] character
numbers are given for each character, in hex. Entity values are
decimal conversions of the ISO 10646 values and refer to the
document character set. Names are Unicode [UNICODE] names.
-->
<!-- Latin Extended-B -->
<!ENTITY fnof "ƒ" ><!-- latin small f with hook = function
= florin, U+0192 ISOtech -->
<!-- Greek -->
<!ENTITY Alpha "Α" ><!-- greek capital letter alpha, U+0391 -->
<!ENTITY Beta "Β" ><!-- greek capital letter beta, U+0392 -->
<!ENTITY Gamma "Γ" ><!-- greek capital letter gamma, U+0393 ISOgrk3 -->
<!ENTITY Delta "Δ" ><!-- greek capital letter delta, U+0394 ISOgrk3 -->
<!ENTITY Epsilon "Ε" ><!-- greek capital letter epsilon, U+0395 -->
<!ENTITY Zeta "Ζ" ><!-- greek capital letter zeta, U+0396 -->
<!ENTITY Eta "Η" ><!-- greek capital letter eta, U+0397 -->
<!ENTITY Theta "Θ" ><!-- greek capital letter theta, U+0398 ISOgrk3 -->
<!ENTITY Iota "Ι" ><!-- greek capital letter iota, U+0399 -->
<!ENTITY Kappa "Κ" ><!-- greek capital letter kappa, U+039A -->
<!ENTITY Lambda "Λ" ><!-- greek capital letter lambda, U+039B ISOgrk3 -->
<!ENTITY Mu "Μ" ><!-- greek capital letter mu, U+039C -->
<!ENTITY Nu "Ν" ><!-- greek capital letter nu, U+039D -->
<!ENTITY Xi "Ξ" ><!-- greek capital letter xi, U+039E ISOgrk3 -->
<!ENTITY Omicron "Ο" ><!-- greek capital letter omicron, U+039F -->
<!ENTITY Pi "Π" ><!-- greek capital letter pi, U+03A0 ISOgrk3 -->
<!ENTITY Rho "Ρ" ><!-- greek capital letter rho, U+03A1 -->
<!-- there is no Sigmaf, and no U+03A2 character either -->
<!ENTITY Sigma "Σ" ><!-- greek capital letter sigma, U+03A3 ISOgrk3 -->
<!ENTITY Tau "Τ" ><!-- greek capital letter tau, U+03A4 -->
<!ENTITY Upsilon "Υ" ><!-- greek capital letter upsilon,
U+03A5 ISOgrk3 -->
<!ENTITY Phi "Φ" ><!-- greek capital letter phi, U+03A6 ISOgrk3 -->
<!ENTITY Chi "Χ" ><!-- greek capital letter chi, U+03A7 -->
<!ENTITY Psi "Ψ" ><!-- greek capital letter psi, U+03A8 ISOgrk3 -->
<!ENTITY Omega "Ω" ><!-- greek capital letter omega, U+03A9 ISOgrk3 -->
<!ENTITY alpha "α" ><!-- greek small letter alpha, U+03B1 ISOgrk3 -->
<!ENTITY beta "β" ><!-- greek small letter beta, U+03B2 ISOgrk3 -->
<!ENTITY gamma "γ" ><!-- greek small letter gamma, U+03B3 ISOgrk3 -->
<!ENTITY delta "δ" ><!-- greek small letter delta, U+03B4 ISOgrk3 -->
<!ENTITY epsilon "ε" ><!-- greek small letter epsilon, U+03B5 ISOgrk3 -->
<!ENTITY zeta "ζ" ><!-- greek small letter zeta, U+03B6 ISOgrk3 -->
<!ENTITY eta "η" ><!-- greek small letter eta, U+03B7 ISOgrk3 -->
<!ENTITY theta "θ" ><!-- greek small letter theta, U+03B8 ISOgrk3 -->
<!ENTITY iota "ι" ><!-- greek small letter iota, U+03B9 ISOgrk3 -->
<!ENTITY kappa "κ" ><!-- greek small letter kappa, U+03BA ISOgrk3 -->
<!ENTITY lambda "λ" ><!-- greek small letter lambda, U+03BB ISOgrk3 -->
<!ENTITY mu "μ" ><!-- greek small letter mu, U+03BC ISOgrk3 -->
<!ENTITY nu "ν" ><!-- greek small letter nu, U+03BD ISOgrk3 -->
<!ENTITY xi "ξ" ><!-- greek small letter xi, U+03BE ISOgrk3 -->
<!ENTITY omicron "ο" ><!-- greek small letter omicron, U+03BF NEW -->
<!ENTITY pi "π" ><!-- greek small letter pi, U+03C0 ISOgrk3 -->
<!ENTITY rho "ρ" ><!-- greek small letter rho, U+03C1 ISOgrk3 -->
<!ENTITY sigmaf "ς" ><!-- greek small letter final sigma, U+03C2 ISOgrk3 -->
<!ENTITY sigma "σ" ><!-- greek small letter sigma, U+03C3 ISOgrk3 -->
<!ENTITY tau "τ" ><!-- greek small letter tau, U+03C4 ISOgrk3 -->
<!ENTITY upsilon "υ" ><!-- greek small letter upsilon, U+03C5 ISOgrk3 -->
<!ENTITY phi "φ" ><!-- greek small letter phi, U+03C6 ISOgrk3 -->
<!ENTITY chi "χ" ><!-- greek small letter chi, U+03C7 ISOgrk3 -->
<!ENTITY psi "ψ" ><!-- greek small letter psi, U+03C8 ISOgrk3 -->
<!ENTITY omega "ω" ><!-- greek small letter omega, U+03C9 ISOgrk3 -->
<!ENTITY thetasym "ϑ" ><!-- greek small letter theta symbol, U+03D1 NEW -->
<!ENTITY upsih "ϒ" ><!-- greek upsilon with hook symbol, U+03D2 NEW -->
<!ENTITY piv "ϖ" ><!-- greek pi symbol, U+03D6 ISOgrk3 -->
<!-- General Punctuation -->
<!ENTITY bull "•" ><!-- bullet = black small circle, U+2022 ISOpub -->
<!-- bullet is NOT the same as bullet operator, U+2219 -->
<!ENTITY hellip "…" ><!-- horizontal ellipsis = three dot leader, U+2026 ISOpub -->
<!ENTITY prime "′" ><!-- prime = minutes = feet, U+2032 ISOtech -->
<!ENTITY Prime "″" ><!-- double prime = seconds = inches, U+2033 ISOtech -->
<!ENTITY oline "‾" ><!-- overline = spacing overscore, U+203E NEW -->
<!ENTITY frasl "⁄" ><!-- fraction slash, U+2044 NEW -->
<!-- Letterlike Symbols -->
<!ENTITY weierp "℘" ><!-- script capital P = power set = Weierstrass p, U+2118 ISOamso -->
<!ENTITY image "ℑ" ><!-- blackletter capital I = imaginary part, U+2111 ISOamso -->
<!ENTITY real "ℜ" ><!-- blackletter capital R = real part symbol, U+211C ISOamso -->
<!ENTITY trade "™" ><!-- trade mark sign, U+2122 ISOnum -->
<!ENTITY alefsym "ℵ" ><!-- alef symbol = first transfinite cardinal, U+2135 NEW -->
<!-- alef symbol is NOT the same as hebrew letter alef, U+05D0 although
the same glyph could be used to depict both characters -->
<!-- Arrows -->
<!ENTITY larr "←" ><!-- leftwards arrow, U+2190 ISOnum -->
<!ENTITY uarr "↑" ><!-- upwards arrow, U+2191 ISOnum-->
<!ENTITY rarr "→" ><!-- rightwards arrow, U+2192 ISOnum -->
<!ENTITY darr "↓" ><!-- downwards arrow, U+2193 ISOnum -->
<!ENTITY harr "↔" ><!-- left right arrow, U+2194 ISOamsa -->
<!ENTITY crarr "↵" ><!-- downwards arrow with corner leftwards
= carriage return, U+21B5 NEW -->
<!ENTITY lArr "⇐" ><!-- leftwards double arrow, U+21D0 ISOtech -->
<!-- Unicode does not say that lArr is the same as the 'is implied by' arrow
but also does not have any other character for that function. So ? lArr can
be used for 'is implied by' as ISOtech suggests -->
<!ENTITY uArr "⇑" ><!-- upwards double arrow, U+21D1 ISOamsa -->
<!ENTITY rArr "⇒" ><!-- rightwards double arrow, U+21D2 ISOtech -->
<!-- Unicode does not say this is the 'implies' character but does not have
another character with this function so ?
rArr can be used for 'implies' as ISOtech suggests -->
<!ENTITY dArr "⇓" ><!-- downwards double arrow, U+21D3 ISOamsa -->
<!ENTITY hArr "⇔" ><!-- left right double arrow, U+21D4 ISOamsa -->
<!-- Mathematical Operators -->
<!ENTITY forall "∀" ><!-- for all, U+2200 ISOtech -->
<!ENTITY part "∂" ><!-- partial differential, U+2202 ISOtech -->
<!ENTITY exist "∃" ><!-- there exists, U+2203 ISOtech -->
<!ENTITY empty "∅" ><!-- empty set = null set, U+2205 ISOamso -->
<!ENTITY nabla "∇" ><!-- nabla = backward difference, U+2207 ISOtech -->
<!ENTITY isin "∈" ><!-- element of, U+2208 ISOtech -->
<!ENTITY notin "∉" ><!-- not an element of, U+2209 ISOtech -->
<!ENTITY ni "∋" ><!-- contains as member, U+220B ISOtech -->
<!-- should there be a more memorable name than 'ni'? -->
<!ENTITY prod "∏" ><!-- n-ary product = product sign, U+220F ISOamsb -->
<!-- prod is NOT the same character as U+03A0 'greek capital letter pi' though
the same glyph might be used for both -->
<!ENTITY sum "∑" ><!-- n-ary sumation, U+2211 ISOamsb -->
<!-- sum is NOT the same character as U+03A3 'greek capital letter sigma'
though the same glyph might be used for both -->
<!ENTITY minus "−" ><!-- minus sign, U+2212 ISOtech -->
<!ENTITY lowast "∗" ><!-- asterisk operator, U+2217 ISOtech -->
<!ENTITY radic "√" ><!-- square root = radical sign, U+221A ISOtech -->
<!ENTITY prop "∝" ><!-- proportional to, U+221D ISOtech -->
<!ENTITY infin "∞" ><!-- infinity, U+221E ISOtech -->
<!ENTITY ang "∠" ><!-- angle, U+2220 ISOamso -->
<!ENTITY and "∧" ><!-- logical and = wedge, U+2227 ISOtech -->
<!ENTITY or "∨" ><!-- logical or = vee, U+2228 ISOtech -->
<!ENTITY cap "∩" ><!-- intersection = cap, U+2229 ISOtech -->
<!ENTITY cup "∪" ><!-- union = cup, U+222A ISOtech -->
<!ENTITY int "∫" ><!-- integral, U+222B ISOtech -->
<!ENTITY there4 "∴" ><!-- therefore, U+2234 ISOtech -->
<!ENTITY sim "∼" ><!-- tilde operator = varies with = similar to, U+223C ISOtech -->
<!-- tilde operator is NOT the same character as the tilde, U+007E,
although the same glyph might be used to represent both -->
<!ENTITY cong "≅" ><!-- approximately equal to, U+2245 ISOtech -->
<!ENTITY asymp "≈" ><!-- almost equal to = asymptotic to, U+2248 ISOamsr -->
<!ENTITY ne "≠" ><!-- not equal to, U+2260 ISOtech -->
<!ENTITY equiv "≡" ><!-- identical to, U+2261 ISOtech -->
<!ENTITY le "≤" ><!-- less-than or equal to, U+2264 ISOtech -->
<!ENTITY ge "≥" ><!-- greater-than or equal to, U+2265 ISOtech -->
<!ENTITY sub "⊂" ><!-- subset of, U+2282 ISOtech -->
<!ENTITY sup "⊃" ><!-- superset of, U+2283 ISOtech -->
<!-- note that nsup, 'not a superset of, U+2283' is not covered by the Symbol
font encoding and is not included. Should it be, for symmetry?
It is in ISOamsn -->
<!ENTITY nsub "⊄" ><!-- not a subset of, U+2284 ISOamsn -->
<!ENTITY sube "⊆" ><!-- subset of or equal to, U+2286 ISOtech -->
<!ENTITY supe "⊇" ><!-- superset of or equal to, U+2287 ISOtech -->
<!ENTITY oplus "⊕" ><!-- circled plus = direct sum, U+2295 ISOamsb -->
<!ENTITY otimes "⊗" ><!-- circled times = vector product, U+2297 ISOamsb -->
<!ENTITY perp "⊥" ><!-- up tack = orthogonal to = perpendicular, U+22A5 ISOtech -->
<!ENTITY sdot "⋅" ><!-- dot operator, U+22C5 ISOamsb -->
<!-- dot operator is NOT the same character as U+00B7 middle dot -->
<!-- Miscellaneous Technical -->
<!ENTITY lceil "⌈" ><!-- left ceiling = apl upstile, U+2308 ISOamsc -->
<!ENTITY rceil "⌉" ><!-- right ceiling, U+2309 ISOamsc -->
<!ENTITY lfloor "⌊" ><!-- left floor = apl downstile, U+230A ISOamsc -->
<!ENTITY rfloor "⌋" ><!-- right floor, U+230B ISOamsc -->
<!ENTITY lang "〈" ><!-- left-pointing angle bracket = bra, U+2329 ISOtech -->
<!-- lang is NOT the same character as U+003C 'less than'
or U+2039 'single left-pointing angle quotation mark' -->
<!ENTITY rang "〉" ><!-- right-pointing angle bracket = ket, U+232A ISOtech -->
<!-- rang is NOT the same character as U+003E 'greater than'
or U+203A 'single right-pointing angle quotation mark' -->
<!-- Geometric Shapes -->
<!ENTITY loz "◊" ><!-- lozenge, U+25CA ISOpub -->
<!-- Miscellaneous Symbols -->
<!ENTITY spades "♠" ><!-- black spade suit, U+2660 ISOpub -->
<!-- black here seems to mean filled as opposed to hollow -->
<!ENTITY clubs "♣" ><!-- black club suit = shamrock, U+2663 ISOpub -->
<!ENTITY hearts "♥" ><!-- black heart suit = valentine, U+2665 ISOpub -->
<!ENTITY diams "♦" ><!-- black diamond suit, U+2666 ISOpub -->
<!-- end of xhtml-symbol.ent -->
"""
return text
def get_apache_license():
license = r"""/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
return license
main()
|
dmitrijus/hltd
|
refs/heads/master
|
lib/urllib3-1.10/urllib3_hltd/exceptions.py
|
214
|
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
|
liavkoren/djangoDev
|
refs/heads/master
|
tests/many_to_one_null/models.py
|
38
|
"""
16. Many-to-one relationships that can be null
To define a many-to-one relationship that can have a null foreign key, use
``ForeignKey()`` with ``null=True`` .
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
reporter = models.ForeignKey(Reporter, null=True)
class Meta:
ordering = ('headline',)
def __str__(self):
return self.headline
|
jjmleiro/hue
|
refs/heads/master
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/config.py
|
56
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import CONFIGNS
from .element import Element
# Autogenerated
def ConfigItem(**args):
return Element(qname = (CONFIGNS, 'config-item'), **args)
def ConfigItemMapEntry(**args):
return Element(qname = (CONFIGNS,'config-item-map-entry'), **args)
def ConfigItemMapIndexed(**args):
return Element(qname = (CONFIGNS,'config-item-map-indexed'), **args)
def ConfigItemMapNamed(**args):
return Element(qname = (CONFIGNS,'config-item-map-named'), **args)
def ConfigItemSet(**args):
return Element(qname = (CONFIGNS, 'config-item-set'), **args)
|
matthewrmshin/cylc
|
refs/heads/master
|
lib/cylc/network/scan.py
|
2
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Port scan utilities."""
import asyncio
import os
from pwd import getpwall
import re
import sys
import socket
from cylc.cfgspec.glbl_cfg import glbl_cfg
import cylc.flags
from cylc.hostuserutil import is_remote_host, get_host_ip_by_name
from cylc.network.client import (
SuiteRuntimeClient, ClientError, ClientTimeout)
from cylc.suite_srv_files_mgr import (
SuiteSrvFilesManager, SuiteServiceFileError)
DEBUG_DELIM = '\n' + ' ' * 4
INACTIVITY_TIMEOUT = 10.0
MSG_QUIT = "QUIT"
MSG_TIMEOUT = "TIMEOUT"
SLEEP_INTERVAL = 0.01
def async_map(coroutine, iterator):
"""Map iterator iterator onto a coroutine.
* Yields results in order as and when they are ready.
* Slow workers can block.
Args:
coroutine (asyncio.coroutine):
I.E. an async function.
iterator (iter):
Should yield tuples to be passed into the coroutine.
Yields:
list - List of results.
Example:
>>> async def square(number): return number ** 2
>>> generator = async_map(square, ((i,) for i in range(5)))
>>> list(generator)
[0, 1, 4, 9, 16]
"""
loop = asyncio.get_event_loop()
awaiting = []
for ind, args in enumerate(iterator):
task = loop.create_task(coroutine(*args))
task.ind = ind
awaiting.append(task)
index = 0
completed_tasks = {}
while awaiting:
completed, awaiting = loop.run_until_complete(
asyncio.wait(awaiting, return_when=asyncio.FIRST_COMPLETED))
completed_tasks.update({t.ind: t.result() for t in completed})
changed = True
while changed and completed_tasks:
if index in completed_tasks:
yield completed_tasks.pop(index)
changed = True
index += 1
def async_unordered_map(coroutine, iterator):
"""Map iterator iterator onto a coroutine.
Args:
coroutine (asyncio.coroutine):
I.E. an async function.
iterator (iter):
Should yield tuples to be passed into the coroutine.
Yields:
tuple - (args, result)
Example:
>>> async def square(number): return number ** 2
>>> generator = async_unordered_map(square, ((i,) for i in range(5)))
>>> sorted(list(generator))
[((0,), 0), ((1,), 1), ((2,), 4), ((3,), 9), ((4,), 16)]
"""
loop = asyncio.get_event_loop()
awaiting = []
for args in iterator:
task = loop.create_task(coroutine(*args))
task.args = args
awaiting.append(task)
while awaiting:
completed, awaiting = loop.run_until_complete(
asyncio.wait(awaiting, return_when=asyncio.FIRST_COMPLETED))
for task in completed:
yield (task.args, task.result())
def scan_many(items, methods=None, timeout=None, ordered=False):
"""Call "identify" method of suites on many host:port.
Args:
items (list): list of 'host' string or ('host', port) tuple to scan.
methods (list): list of 'method' string to be executed when scanning.
timeout (float): connection timeout, default is CONNECT_TIMEOUT.
ordered (bool): whether to scan items in order or not (default).
Return:
list: [(host, port, identify_result), ...]
"""
args = ((reg, host, port, timeout, methods) for reg, host, port in items)
if ordered:
yield from async_map(scan_one, args)
else:
yield from (
result for _, result in async_unordered_map(scan_one, args))
async def scan_one(reg, host, port, timeout=None, methods=None):
if not methods:
methods = ['identify']
if is_remote_host(host):
try:
host = get_host_ip_by_name(host) # IP reduces DNS traffic
except socket.error as exc:
if cylc.flags.debug:
raise
sys.stderr.write("ERROR: %s: %s\n" % (exc, host))
return (reg, host, port, None)
# NOTE: Connect to the suite by host:port, this was the
# SuiteRuntimeClient will not attempt to check the contact file
# which would be unnecessary as we have already done so.
# NOTE: This part of the scan *is* IO blocking.
client = SuiteRuntimeClient(reg, host=host, port=port, timeout=timeout)
result = {}
for method in methods:
# work our way up the chain of identity methods, extract as much
# information as we can before the suite rejects us
try:
msg = await client.async_request(method)
except ClientTimeout as exc:
return (reg, host, port, MSG_TIMEOUT)
except ClientError as exc:
return (reg, host, port, result or None)
else:
result.update(msg)
return (reg, host, port, result)
def re_compile_filters(patterns_owner=None, patterns_name=None):
"""Compile regexp for suite owner and suite name scan filters.
Arguments:
patterns_owner (list): List of suite owner patterns
patterns_name (list): List of suite name patterns
Returns (tuple):
A 2-element tuple in the form (cre_owner, cre_name). Either or both
element can be None to allow for the default scan behaviour.
"""
cres = {'owner': None, 'name': None}
for label, items in [('owner', patterns_owner), ('name', patterns_name)]:
if items:
cres[label] = r'\A(?:' + r')|(?:'.join(items) + r')\Z'
try:
cres[label] = re.compile(cres[label])
except re.error:
raise ValueError(r'%s=%s: bad regexp' % (label, items))
return (cres['owner'], cres['name'])
def get_scan_items_from_fs(owner_pattern=None, reg_pattern=None):
"""Scrape list of suites from the filesystem.
Walk users' "~/cylc-run/" to get (host, port) from ".service/contact" for
active suites.
Yields:
tuple - (reg, host, port)
"""
srv_files_mgr = SuiteSrvFilesManager()
if owner_pattern is None:
# Run directory of current user only
run_dirs = [(glbl_cfg().get_host_item('run directory'), None)]
else:
# Run directory of all users matching "owner_pattern".
# But skip those with /nologin or /false shells
run_dirs = []
skips = ('/false', '/nologin')
for pwent in getpwall():
if any(pwent.pw_shell.endswith(s) for s in (skips)):
continue
if owner_pattern.match(pwent.pw_name):
run_dirs.append((
glbl_cfg().get_host_item(
'run directory',
owner=pwent.pw_name,
owner_home=pwent.pw_dir),
pwent.pw_name))
if cylc.flags.debug:
sys.stderr.write('Listing suites:%s%s\n' % (
DEBUG_DELIM, DEBUG_DELIM.join(item[1] for item in run_dirs if
item[1] is not None)))
for run_d, owner in run_dirs:
for dirpath, dnames, _ in os.walk(run_d, followlinks=True):
# Always descend for top directory, but
# don't descend further if it has a .service/ or log/ dir
if dirpath != run_d and (
srv_files_mgr.DIR_BASE_SRV in dnames or 'log' in dnames):
dnames[:] = []
# Filter suites by name
reg = os.path.relpath(dirpath, run_d)
if reg_pattern and not reg_pattern.match(reg):
continue
# Choose only suites with .service and matching filter
try:
contact_data = srv_files_mgr.load_contact_file(reg, owner)
except (SuiteServiceFileError, IOError, TypeError, ValueError):
continue
else:
yield (
reg,
contact_data[srv_files_mgr.KEY_HOST],
contact_data[srv_files_mgr.KEY_PORT]
)
|
Hellowlol/PyTunes
|
refs/heads/master
|
libs/cherrypy/test/test_httplib.py
|
42
|
"""Tests for cherrypy/lib/httputil.py."""
import unittest
from cherrypy.lib import httputil
class UtilityTests(unittest.TestCase):
def test_urljoin(self):
# Test all slash+atom combinations for SCRIPT_NAME and PATH_INFO
self.assertEqual(httputil.urljoin("/sn/", "/pi/"), "/sn/pi/")
self.assertEqual(httputil.urljoin("/sn/", "/pi"), "/sn/pi")
self.assertEqual(httputil.urljoin("/sn/", "/"), "/sn/")
self.assertEqual(httputil.urljoin("/sn/", ""), "/sn/")
self.assertEqual(httputil.urljoin("/sn", "/pi/"), "/sn/pi/")
self.assertEqual(httputil.urljoin("/sn", "/pi"), "/sn/pi")
self.assertEqual(httputil.urljoin("/sn", "/"), "/sn/")
self.assertEqual(httputil.urljoin("/sn", ""), "/sn")
self.assertEqual(httputil.urljoin("/", "/pi/"), "/pi/")
self.assertEqual(httputil.urljoin("/", "/pi"), "/pi")
self.assertEqual(httputil.urljoin("/", "/"), "/")
self.assertEqual(httputil.urljoin("/", ""), "/")
self.assertEqual(httputil.urljoin("", "/pi/"), "/pi/")
self.assertEqual(httputil.urljoin("", "/pi"), "/pi")
self.assertEqual(httputil.urljoin("", "/"), "/")
self.assertEqual(httputil.urljoin("", ""), "/")
if __name__ == '__main__':
unittest.main()
|
gusano/supercollider
|
refs/heads/develop
|
editors/sced/sced/__init__.py
|
46
|
# sced (SuperCollider mode for gedit)
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gedit
import gtk
from WindowHelper import WindowHelper
from Settings import Settings
from ConfigurationDialog import ConfigurationDialog
class ScedPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
self.settings = Settings()
self.__instances = {}
def activate(self, window):
self.__instances[window] = WindowHelper(self, window)
def deactivate(self, window):
self.__instances[window].deactivate()
del self.__instances[window]
def update_ui(self, window):
self.__instances[window].update_ui()
def create_configure_dialog(self):
dialog = ConfigurationDialog(self)
return dialog
|
mahak/keystone
|
refs/heads/master
|
keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy as sql
_USER_TABLE_NAME = 'user'
_USER_NAME_COLUMN_NAME = 'name'
_USER_DOMAINID_COLUMN_NAME = 'domain_id'
_USER_PASSWORD_COLUMN_NAME = 'password' # nosec
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
user_table = sql.Table(_USER_TABLE_NAME, meta, autoload=True)
# NOTE(gnuoy): the `domain_id` unique constraint is not guaranteed to
# be a fixed name, such as 'ixu_user_name_domain_id`, so we need to
# search for the correct constraint that only affects
# user_table.c.domain_id and drop that constraint. (Fix based on
# morganfainbergs fix in 088_domain_specific_roles.py)
#
# This is an idempotent change that reflects the fix to migration
# 91 if the user name & domain_id unique constraint was not named
# consistently and someone manually fixed the migrations / db
# without dropping the old constraint.
# This is a copy of migration 97 to catch any/all deployments that
# are close to master. migration 97 will be backported to
# stable/mitaka.
to_drop = None
if migrate_engine.name == 'mysql':
for index in user_table.indexes:
if (index.unique and len(index.columns) == 2 and
_USER_DOMAINID_COLUMN_NAME in index.columns and
_USER_NAME_COLUMN_NAME in index.columns):
to_drop = index
break
else:
for index in user_table.constraints:
if (len(index.columns) == 2 and
_USER_DOMAINID_COLUMN_NAME in index.columns and
_USER_NAME_COLUMN_NAME in index.columns):
to_drop = index
break
# remove domain_id and name unique constraint
if to_drop is not None:
migrate.UniqueConstraint(user_table.c.domain_id,
user_table.c.name,
name=to_drop.name).drop()
# If migration 91 was aborted due to Bug #1572341 then columns may not
# have been dropped.
if _USER_DOMAINID_COLUMN_NAME in user_table.c:
user_table.c.domain_id.drop()
if _USER_NAME_COLUMN_NAME in user_table.c:
user_table.c.name.drop()
if _USER_PASSWORD_COLUMN_NAME in user_table.c:
user_table.c.password.drop()
|
brianloveswords/django-badger
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
setup(
name='django-badger',
version='0.0.1',
description='Django app for managing and awarding badgers',
long_description=open('README.rst').read(),
author='Leslie Michael Orchard',
author_email='me@lmorchard.com',
url='http://github.com/lmorchard/django-badger',
license='BSD',
packages=['badger', 'badger.templatetags', 'badger.management', 'badger.management.commands', 'badger.migrations'],
package_data={'badger': ['fixtures/*', 'templates/badger_playdoh/*.html', 'templates/badger_playdoh/includes/*.html', 'templates/badger_vanilla/*.html', 'templates/badger_vanilla/includes/*.html']},
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
# I don't know what exactly this means, but why not?
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
dcowden/cadquery-freecad-module
|
refs/heads/master
|
CadQuery/Libs/pyqode/core/dialogs/encodings.py
|
3
|
"""
This module contains some dialogs to help you manage encodings in
you application.
"""
import locale
from pyqode.core.api import encodings
from pyqode.qt import QtCore, QtWidgets, QtGui
from pyqode.core.cache import Cache
from pyqode.core._forms import dlg_preferred_encodings_editor_ui
class DlgPreferredEncodingsEditor(QtWidgets.QDialog):
"""
This dialog is used to edit the preferred encodings that appears in
the encodings menu/combo box.
"""
def __init__(self, parent=None):
super(DlgPreferredEncodingsEditor, self).__init__(parent)
self.ui = dlg_preferred_encodings_editor_ui.Ui_Dialog()
self.ui.setupUi(self)
self._load_preferred()
self.ui.pushButtonAdd.clicked.connect(self._add)
self.ui.pushButtonAdd.setIcon(QtGui.QIcon.fromTheme(
"go-next", QtGui.QIcon(':/pyqode-icons/rc/go-next.png')))
self.ui.pushButtonRemove.clicked.connect(self._remove)
self.ui.pushButtonRemove.setIcon(QtGui.QIcon.fromTheme(
"go-previous", QtGui.QIcon(':/pyqode-icons/rc/go-previous.png')))
def _load_available(self):
self.ui.tableWidgetAvailable.setColumnCount(2)
self.ui.tableWidgetAvailable.setSelectionMode(
self.ui.tableWidgetAvailable.SingleSelection)
self.ui.tableWidgetAvailable.setSelectionBehavior(
self.ui.tableWidgetAvailable.SelectRows)
self.ui.tableWidgetAvailable.setHorizontalHeaderLabels([
'Encoding', 'Language'])
self.ui.tableWidgetAvailable.verticalHeader().hide()
self.ui.tableWidgetAvailable.setSortingEnabled(True)
preferred = Cache().preferred_encodings
for key in sorted(encodings.ENCODINGS_MAP.keys()):
value = encodings.ENCODINGS_MAP[key]
if key not in preferred:
# lang_item.setData(QtCore.Qt.UserRole, key)
row = self.ui.tableWidgetAvailable.rowCount()
self.ui.tableWidgetAvailable.insertRow(row)
for column in range(2):
item = QtWidgets.QTableWidgetItem(value[column].strip())
item.setData(QtCore.Qt.UserRole, key)
# item.setData(QtCore.Qt.UserRole, key)
self.ui.tableWidgetAvailable.setItem(row, column, item)
self.ui.tableWidgetAvailable.sortByColumn(0, QtCore.Qt.AscendingOrder)
def _load_preferred(self):
self._load_available() # setup preferred encodings
self.ui.tableWidgetPreferred.setColumnCount(2)
self.ui.tableWidgetPreferred.setSelectionMode(
self.ui.tableWidgetPreferred.SingleSelection)
self.ui.tableWidgetPreferred.setSelectionBehavior(
self.ui.tableWidgetPreferred.SelectRows)
self.ui.tableWidgetPreferred.setHorizontalHeaderLabels([
'Encoding', 'Language'])
self.ui.tableWidgetPreferred.verticalHeader().hide()
self.ui.tableWidgetPreferred.setSortingEnabled(True)
for i, encoding in enumerate(Cache().preferred_encodings):
encoding = encodings.convert_to_codec_key(encoding)
value = encodings.ENCODINGS_MAP[encoding]
row = self.ui.tableWidgetPreferred.rowCount()
self.ui.tableWidgetPreferred.insertRow(row)
for column in range(2):
item = QtWidgets.QTableWidgetItem(value[column].strip())
item.setData(QtCore.Qt.UserRole, encoding)
self.ui.tableWidgetPreferred.setItem(row, column, item)
self.ui.tableWidgetPreferred.sortByColumn(0, QtCore.Qt.AscendingOrder)
def _transfer_selected_items(self, source, destination):
# keeping sorting enabled cause bug for the second transferred item
destination.setSortingEnabled(False)
row = source.currentRow()
if row != -1:
# take items from source
items = []
encoding = source.item(row, 0).data(QtCore.Qt.UserRole)
is_locale = encoding == encodings.convert_to_codec_key(
locale.getpreferredencoding())
if source == self.ui.tableWidgetPreferred and is_locale:
destination.setSortingEnabled(True)
return
for i in range(2):
items.append(source.takeItem(row, i))
source.removeRow(row)
# insert a new row in the taken items into destination
row = destination.rowCount()
destination.insertRow(row)
for col, item in enumerate(items):
item = QtWidgets.QTableWidgetItem(item)
destination.setItem(row, col, item)
destination.setSortingEnabled(True)
def _add(self):
self._transfer_selected_items(self.ui.tableWidgetAvailable,
self.ui.tableWidgetPreferred)
def _remove(self):
self._transfer_selected_items(self.ui.tableWidgetPreferred,
self.ui.tableWidgetAvailable)
def get_preferred_encodings(self):
"""
Gets the list of preferred encodings.
:return: list
"""
encodings = []
for row in range(self.ui.tableWidgetPreferred.rowCount()):
item = self.ui.tableWidgetPreferred.item(row, 0)
encodings.append(item.data(QtCore.Qt.UserRole))
return encodings
@classmethod
def edit_encoding(cls, parent):
"""
Static helper method that shows the encoding editor dialog
If the dialog was accepted the new encodings are added to the settings.
:param parent: parent widget
:return: True in case of succes, False otherwise
"""
dlg = cls(parent)
if dlg.exec_() == dlg.Accepted:
settings = Cache()
settings.preferred_encodings = dlg.get_preferred_encodings()
return True
return False
class DlgEncodingsChoice(QtWidgets.QDialog):
"""
This dialogs ask the user to choose an encoding from a combo box.
You can use it if you're not using the encoding panel when there is a
decoding error when opening a file.
"""
def __init__(self, parent, path, encoding):
super(DlgEncodingsChoice, self).__init__(parent)
self.setWindowTitle('Choose encoding')
# avoid circular references with CodeEdit
from pyqode.core._forms import dlg_encodings_ui
self.ui = dlg_encodings_ui.Ui_Dialog()
self.ui.setupUi(self)
self.ui.comboBoxEncodings.current_encoding = encoding
self.ui.lblDescription.setText(
self.ui.lblDescription.text() %
('There was a problem opening the file %r with encoding: %s' %
(path, encoding)))
@classmethod
def choose_encoding(cls, parent, path, encoding):
"""
Show the encodings dialog and returns the user choice.
:param parent: parent widget.
:param path: file path
:param encoding: current file encoding
:return: selected encoding
"""
dlg = cls(parent, path, encoding)
dlg.exec_()
return dlg.ui.comboBoxEncodings.current_encoding
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
new_encoding = DlgEncodingsChoice.choose_encoding(None, __file__, 'utf-8')
print(new_encoding)
|
open-keychain/SafeSlinger-AppEngine
|
refs/heads/openkeychain-master
|
safeslinger-demo/python/syncData.py
|
2
|
# The MIT License (MIT)
#
# Copyright (c) 2010-2015 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import json
import logging
import os
import struct
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import member
class SyncData(webapp.RequestHandler):
isJson = False
def post(self):
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
header = self.request.headers['Content-Type']
logging.debug("Content-Type: '%s'" % header)
if (str(header).startswith('text/plain')):
self.isJson = True
# set response to json
self.response.headers['Content-Type'] = 'text/plain'
data_dict = json.loads(self.request.body)
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
STR_VERSERVER = '01060000'
INT_VERCLIENT = 0x01060000
STR_VERCLIENT = '1.6'
if not os.environ.has_key('HTTPS'):
self.resp_simple(0, 'HTTPS environment variable not found')
return
if not os.environ.has_key('CURRENT_VERSION_ID'):
self.resp_simple(0, 'CURRENT_VERSION_ID environment variable not found')
return
HTTPS = os.environ.get('HTTPS', 'off')
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID', STR_VERSERVER)
# SSL must be enabled
if HTTPS.__str__() != 'on':
self.resp_simple(0, 'Secure socket required.')
return
minlen = 4 + 4 + 4 + 4
# get the data from the post
data = self.request.body
logging.debug("in body '%s'" % data)
size = str.__len__(data)
logging.debug("in size %d" % size)
if size < minlen:
self.resp_simple(0, 'Request was formatted incorrectly.')
return
# unpack all incoming data
server = int(CURRENT_VERSION_ID[0:8], 16)
if self.isJson:
client = int(data_dict['ver_client'], 10)
else:
client = (struct.unpack("!i", data[0:4]))[0]
logging.debug("in client %d" % client)
if self.isJson:
usrid = int(data_dict['usrid'], 10)
else:
usrid = (struct.unpack("!i", data[4:8]))[0]
logging.debug("in usrid %d" % usrid)
if self.isJson:
numEntry = data_dict['usrids'].__len__()
else:
numEntry = (struct.unpack("!i", data[8:12]))[0]
logging.debug("in numEntry %d" % numEntry)
pos = 12
expectedsize = 4 + 4 + 4 + (4 * numEntry)
# append enough entries to hold the expected data
usrids = []
i = 0
while numEntry > len(usrids):
if self.isJson:
otherid = int(data_dict['usrids'][i], 10)
i += 1
else:
otherid = struct.unpack("!i", data[pos:pos + 4])[0]
pos += 4
usrids.append(otherid)
logging.debug("in usrid known %i" % otherid)
postSelf = False
if self.isJson:
if 'data_b64' in data_dict:
newVal = base64.decodestring(data_dict['data_b64'])
postSelf = True
else:
if size > expectedsize:
newVal = data[pos:]
postSelf = True
if postSelf:
logging.debug("in data '%s'" % newVal)
# client version check
if client < INT_VERCLIENT:
self.resp_simple(0, ('Client version mismatch; %s required. Download latest client release first.' % STR_VERCLIENT))
return
# verify you have an existing group
query = member.Member.all()
query.filter('usr_id =', usrid)
num = query.count()
# user exists
if num == 1:
mem = query.get()
usridlink = mem.usr_id_link
# add data...
if postSelf:
mem.data = newVal
mem.put()
key = mem.key()
if not key.has_id_or_name():
self.resp_simple(0, 'Unable to update user.')
return
# not posting signature, one must exist
else:
if mem.data == None:
self.resp_simple(0, 'Request was formatted incorrectly.')
return
# get the entries for the group
query = member.Member.all()
query.filter('usr_id_link =', usridlink)
mems = query.fetch(1000)
# version
if not self.isJson:
self.response.out.write('%s' % struct.pack('!i', server))
logging.debug("out server %i" % server)
# grand total
total = 0
for mem in mems:
if mem.data != None:
total = total + 1
if not self.isJson:
self.response.out.write('%s' % struct.pack('!i', total))
logging.debug("out total datas %i" % total)
# add delta ids total
delta = 0
for mem in mems:
posted = False
for known in usrids:
if known == mem.usr_id:
posted = True
if (not posted) & (mem.data != None):
delta = delta + 1
if not self.isJson:
self.response.out.write('%s' % struct.pack('!i', delta))
logging.debug("out delta datas %i" % delta)
deltas = []
for mem in mems:
posted = False
for known in usrids:
if known == mem.usr_id:
posted = True
if (not posted) & (mem.data != None):
length = str.__len__(mem.data)
if self.isJson:
deltas.append({'usrid' : str(mem.usr_id), 'data_b64' : base64.encodestring(mem.data) })
else:
self.response.out.write('%s%s' % (struct.pack('!ii', mem.usr_id, length), mem.data))
logging.debug("out mem.usr_id %i" % mem.usr_id)
logging.debug("out mem.data length %i" % length)
logging.debug("out mem.data '%s'" % mem.data)
else:
self.resp_simple(0, 'user %i does not exist' % (usrid))
return
if self.isJson:
json.dump({"ver_server":str(server), "data_total":str(total), "data_deltas":deltas }, self.response.out)
def resp_simple(self, code, msg):
if self.isJson:
json.dump({"err_code":str(code), "err_msg":str(msg)}, self.response.out)
else:
self.response.out.write('%s%s' % (struct.pack('!i', code), msg))
if code == 0:
logging.error(msg)
def main():
STR_VERSERVER = '01060000'
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID', STR_VERSERVER)
isProd = CURRENT_VERSION_ID[8:9] == 'p'
# Set the logging level in the main function
if isProd:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/syncData', SyncData),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
cuckoobox/cuckoo
|
refs/heads/master
|
cuckoo/data/analyzer/linux/lib/core/startup.py
|
1
|
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import logging
from lib.common.constants import PATHS
from lib.common.results import NetlogHandler
log = logging.getLogger()
def create_folders():
"""Create folders in PATHS."""
for name, folder in PATHS.items():
if os.path.exists(folder):
continue
try:
os.makedirs(folder)
except OSError:
pass
def init_logging():
"""Initialize logger."""
formatter = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s: %(message)s")
sh = logging.StreamHandler()
sh.setFormatter(formatter)
log.addHandler(sh)
nh = NetlogHandler()
nh.setFormatter(formatter)
log.addHandler(nh)
log.setLevel(logging.DEBUG)
|
leonsim/me
|
refs/heads/master
|
webapp/views/thread/__init__.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import simplejson as json
from quixote.errors import TraversalError, AccessError
from libs.template import st, stf
from webapp.models.group import Thread
from webapp.views import check_access
_q_exports = []
@check_access
def _q_lookup(req, id):
thread = Thread.get(id)
if thread:
return ThreadUI(req, thread)
return TraversalError("no such thread")
class ThreadUI(object):
_q_exports = ['remove']
def __init__(self, req, thread):
self.thread = thread
def _q_index(self, req):
thread = self.thread
return st('/thread/thread.html', **locals())
def remove(self, req):
if req.user and req.user.id == self.thread.user_id:
group = self.thread.group
Thread.remove(self.thread.id, req.user.id)
return req.redirect("/")
return AccessError("need permission")
|
ahwolf/ChicagoEnergyMap
|
refs/heads/master
|
fabfile/provision.py
|
1
|
"""
Functions for provisioning environments with fabtools (eat shit puppet!)
"""
# standard library
import sys
import copy
import os
from distutils.util import strtobool
# 3rd party
import fabric
from fabric.api import env, task, local, run, settings, cd, sudo, lcd
import fabtools
from fabtools.vagrant import vagrant_settings
# local
import decorators
import utils
@task
@decorators.needs_environment
def apt_get_update(max_age=86400*7):
"""refresh apt-get index if its more than max_age out of date
"""
with vagrant_settings(env.host_string):
try:
fabtools.require.deb.uptodate_index(max_age=max_age)
except AttributeError:
msg = (
"Looks like your fabtools is out of date. "
"Try updating fabtools first:\n"
" sudo pip install fabtools==0.17.0"
)
raise Exception(msg)
@task
@decorators.needs_environment
def python_packages():
"""install python packages"""
filename = os.path.join(utils.remote_project_root(), "REQUIREMENTS")
with vagrant_settings(env.host_string):
fabtools.require.python.requirements(filename, use_sudo=True)
@task
@decorators.needs_environment
def debian_packages():
"""install debian packages"""
# get the list of packages
filename = os.path.join(utils.project_root(), "REQUIREMENTS-DEB")
with open(filename, 'r') as stream:
packages = stream.read().strip().splitlines()
# install them all with fabtools.
with vagrant_settings(env.host_string):
fabtools.require.deb.packages(packages)
@task
@decorators.needs_environment
def packages():
"""install all packages"""
debian_packages()
python_packages()
@task
@decorators.needs_environment
def setup_analysis():
"""prepare analysis environment"""
pass
# with vagrant_settings(env.host_string):
# # write a analysis.ini file that has the provider so we can
# # easily distinguish between development and production
# # environments when we run our analysis
# template = os.path.join(
# utils.fabfile_templates_root(),
# "server_config.ini",
# )
# fabtools.require.files.template_file(
# path="/vagrant/server_config.ini",
# template_source=template,
# context=env,
# )
# # create a data directory where all of the analysis and raw
# # data is stored.
# data_dir = "/vagrant/data"
# fabtools.require.files.directory(data_dir)
@task
@decorators.needs_environment
def setup_django(do_rsync=True):
"""setup django"""
with vagrant_settings(env.host_string):
# extract necessary configuration variables from INI file
parser = utils.get_config_parser()
mysql_root_password = parser.get('mysql', 'root_password')
django_username = parser.get('mysql', 'django_root_username')
django_password = parser.get('mysql', 'django_root_password')
django_db = parser.get('mysql', 'django_database')
facebook_id = parser.get('social', 'FACEBOOK_APP_ID')
# setup mysql
fabtools.require.mysql.server(password=mysql_root_password)
with settings(mysql_user='root', mysql_password=mysql_root_password):
fabtools.require.mysql.user(django_username, django_password)
fabtools.require.mysql.database(django_db,owner=django_username)
# write the local django settings. since local.py is listed in
# the .hgignore, the -C option to rsync must ignore it. this
# needs to go AFTER rsyncing
# rsync directory to get all models, views, etc into the
# /srv/www directory.
#
# TODO: Use a soft link to the figures/templates directory to
# avoid unnecessary rsyncing of data from analysis?
site_name = "chicagoenergy.datascopeanalytics.com"
web_dir = "Map"
site_root = os.path.join("/srv", "www", site_name, web_dir)
fabtools.require.directory(site_root, owner="www-data", use_sudo=True)
if do_rsync:
sudo("rsync -avC --exclude='*.hg' /vagrant/%s %s" % (
web_dir, os.path.dirname(site_root)
))
for root_dir in ["/vagrant/" + web_dir, site_root]:
# make sure the dir exists (for the site_root one)
target_dir = root_dir+"/Map/settings/"
fabtools.require.directory(target_dir, owner="www-data", use_sudo=True)
# use_sudo is necessary (for the site_root one)
fabtools.require.files.template_file(
path=root_dir+"/Map/settings/local.py",
template_source=os.path.join(
utils.fabfile_templates_root(), "django_settings.py"
),
context={
"django_db": django_db,
"django_username": django_username,
"django_password": django_password,
"FACEBOOK_APP_ID": facebook_id,
},
use_sudo=True,
)
# collect the static files
with cd("/vagrant/Map"):
run("./manage.py collectstatic --noinput")
# make sure permissions are set up properly
#sudo("chmod -R a+w %s" % site_root)
sudo("chmod -R g+w %s" % site_root)
sudo("chgrp -R www-data %s" % site_root)
# # make sure permissions are set up properly
# #sudo("chmod -R a+w %s" % site_root)
# sudo("chmod -R g+w %s" % site_root)
# sudo("chgrp -R www-data %s" % site_root)
# make sure database is up and running
with cd("/vagrant/Map"):
run("./manage.py syncdb --noinput")
run("./manage.py migrate")
# setup apache
# fabtools.require.apache.module_enabled("mod_wsgi") # __future__
config_filename = os.path.join(
utils.fabfile_templates_root(),
"apache.conf",
)
fabtools.require.apache.site(
'chicagoenergy.datascopeanalytics.com',
template_source=config_filename,
wsgi_application_group=r"%{GLOBAL}",
site_name=site_name,
site_root=site_root,
)
fabtools.require.apache.disabled('default')
@task(default=True)
@decorators.needs_environment
def default(do_rsync=True):
"""run all provisioning tasks"""
# http://stackoverflow.com/a/19536667/564709
if isinstance(do_rsync, (str, unicode,)):
do_rsync = bool(strtobool(do_rsync))
# rsync files (Vagrant isn't doing any provisioning now)
if do_rsync:
local("vagrant provision %(host_string)s" % env)
# run all of these provisioning tasks in the order specified here
apt_get_update()
# install debian packages first to make sure any compiling python
# packages have necessary dependencies
packages()
# set up anything that needs to be done prior to running the
# analysis via make
setup_analysis()
|
replicatorg/ReplicatorG
|
refs/heads/master
|
skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/splodge.py
|
7
|
"""
This page is in the table of contents.
Splodge turns the extruder on just before the start of a thread. This is to give the extrusion a bit anchoring at the beginning.
The splodge manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Splodge
==Operation==
The default 'Activate Splodge' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Initial===
====Initial Lift over Extra Thickness====
Default is one.
Defines the amount the extruder will be lifted over the extra thickness of the initial splodge thread. The higher the ratio, the more the extruder will be lifted over the splodge, if the ratio is too low the extruder might plow through the splodge extrusion.
====Initial Splodge Feed Rate====
Default is one millimeter per second.
Defines the feed rate at which the initial extra extrusion will be added. With the default feed rate, the splodge will be added slower so it will be thicker than the regular extrusion.
====Initial Splodge Quantity Length====
Default is thirty millimeters.
Defines the quantity length of extra extrusion at the operating feed rate that will be added to the initial thread. If a splodge quantity length is smaller than 0.1 times the edge width, no splodge of that type will be added.
===Operating===
====Operating Lift over Extra Thickness====
Default is one.
Defines the amount the extruder will be lifted over the extra thickness of the operating splodge thread.
====Operating Splodge Feed Rate====
Default is one millimeter per second.
Defines the feed rate at which the next extra extrusions will be added.
====Operating Splodge Quantity Length====
Default is thirty millimeters.
Defines the quantity length of extra extrusion at the operating feed rate that will be added for the next threads.
==Examples==
The following examples splodge the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and splodge.py.
> python splodge.py
This brings up the splodge dialog.
> python splodge.py Screw Holder Bottom.stl
The splodge tool is parsing the file:
Screw Holder Bottom.stl
..
The splodge tool has created the file:
.. Screw Holder Bottom_splodge.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, splodgeRepository = None ):
"Splodge a gcode linear move file or text."
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, text), splodgeRepository )
def getCraftedTextFromText( gcodeText, splodgeRepository = None ):
"Splodge a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'splodge'):
return gcodeText
if splodgeRepository == None:
splodgeRepository = settings.getReadRepository( SplodgeRepository() )
if not splodgeRepository.activateSplodge.value:
return gcodeText
return SplodgeSkein().getCraftedGcode( gcodeText, splodgeRepository )
def getNewRepository():
'Get new repository.'
return SplodgeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Splodge a gcode linear move file."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'splodge', shouldAnalyze)
class SplodgeRepository:
"A class to handle the splodge settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.splodge.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Splodge', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Splodge')
self.activateSplodge = settings.BooleanSetting().getFromValue('Activate Splodge', self, False )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Initial -', self )
self.initialLiftOverExtraThickness = settings.FloatSpin().getFromValue( 0.5, 'Initial Lift over Extra Thickness (ratio):', self, 1.5, 1.0 )
self.initialSplodgeFeedRate = settings.FloatSpin().getFromValue( 0.4, 'Initial Splodge Feed Rate (mm/s):', self, 2.4, 1.0 )
self.initialSplodgeQuantityLength = settings.FloatSpin().getFromValue( 10.0, 'Initial Splodge Quantity Length (millimeters):', self, 90.0, 30.0 )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Operating -', self )
self.operatingLiftOverExtraThickness = settings.FloatSpin().getFromValue( 0.5, 'Operating Lift over Extra Thickness (ratio):', self, 1.5, 1.0 )
self.operatingSplodgeFeedRate = settings.FloatSpin().getFromValue( 0.4, 'Operating Splodge Feed Rate (mm/s):', self, 2.4, 1.0 )
self.operatingSplodgeQuantityLength = settings.FloatSpin().getFromValue(0.4, 'Operating Splodge Quantity Length (millimeters):', self, 2.4, 1.0)
settings.LabelSeparator().getFromRepository(self)
self.executeTitle = 'Splodge'
def execute(self):
"Splodge button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class SplodgeSkein:
"A class to splodge a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.feedRateMinute = 961.0
self.isExtruderActive = False
self.hasInitialSplodgeBeenAdded = False
self.isLastExtruderCommandActivate = False
self.lastLineOutput = None
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.operatingFeedRatePerSecond = 15.0
def addLineUnlessIdentical(self, line):
"Add a line, unless it is identical to the last line."
if line == self.lastLineOutput:
return
self.lastLineOutput = line
self.distanceFeedRate.addLine(line)
def addLineUnlessIdenticalReactivate(self, line):
"Add a line, unless it is identical to the last line or another M101."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'M101':
if not self.isLastExtruderCommandActivate:
self.addLineUnlessIdentical(line)
self.isLastExtruderCommandActivate = True
return
if firstWord == 'M103':
self.isLastExtruderCommandActivate = False
self.addLineUnlessIdentical(line)
def getCraftedGcode( self, gcodeText, splodgeRepository ):
"Parse gcode text and store the splodge gcode."
self.lines = archive.getTextLines(gcodeText)
self.setRotations()
self.splodgeRepository = splodgeRepository
self.parseInitialization( splodgeRepository )
self.boundingRectangle = gcodec.BoundingRectangle().getFromGcodeLines( self.lines[self.lineIndex :], 0.5 * self.edgeWidth )
self.initialSplodgeFeedRateMinute = 60.0 * splodgeRepository.initialSplodgeFeedRate.value
self.initialStartupDistance = splodgeRepository.initialSplodgeQuantityLength.value * splodgeRepository.initialSplodgeFeedRate.value / self.operatingFeedRatePerSecond
self.operatingSplodgeFeedRateMinute = 60.0 * splodgeRepository.operatingSplodgeFeedRate.value
self.operatingStartupDistance = splodgeRepository.operatingSplodgeQuantityLength.value * splodgeRepository.operatingSplodgeFeedRate.value / self.operatingFeedRatePerSecond
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getInitialSplodgeLine( self, line, location ):
"Add the initial splodge line."
if not self.isJustBeforeExtrusion():
return line
self.hasInitialSplodgeBeenAdded = True
if self.splodgeRepository.initialSplodgeQuantityLength.value < self.minimumQuantityLength:
return line
return self.getSplodgeLineGivenDistance( self.initialSplodgeFeedRateMinute, line, self.splodgeRepository.initialLiftOverExtraThickness.value, location, self.initialStartupDistance )
def getNextActiveLocationComplex(self):
"Get the next active line."
isActive = False
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M101':
isActive = True
if firstWord == 'G1' and isActive:
return gcodec.getLocationFromSplitLine(self.oldLocation, splitLine).dropAxis()
return None
def getOperatingSplodgeLine( self, line, location ):
"Get the operating splodge line."
if not self.isJustBeforeExtrusion():
return line
if self.splodgeRepository.operatingSplodgeQuantityLength.value < self.minimumQuantityLength:
return line
return self.getSplodgeLineGivenDistance( self.operatingSplodgeFeedRateMinute, line, self.splodgeRepository.operatingLiftOverExtraThickness.value, location, self.operatingStartupDistance )
def getSplodgeLine(self, line, location, splitLine):
"Get splodged gcode line."
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
if self.hasInitialSplodgeBeenAdded:
return self.getOperatingSplodgeLine(line, location)
return self.getInitialSplodgeLine(line, location)
def getSplodgeLineGivenDistance( self, feedRateMinute, line, liftOverExtraThickness, location, startupDistance ):
"Add the splodge line."
locationComplex = location.dropAxis()
relativeStartComplex = None
nextLocationComplex = self.getNextActiveLocationComplex()
if nextLocationComplex != None:
if nextLocationComplex != locationComplex:
relativeStartComplex = locationComplex - nextLocationComplex
if relativeStartComplex == None:
relativeStartComplex = complex( 19.9, 9.9 )
if self.oldLocation != None:
oldLocationComplex = self.oldLocation.dropAxis()
if oldLocationComplex != locationComplex:
relativeStartComplex = oldLocationComplex - locationComplex
relativeStartComplex *= startupDistance / abs( relativeStartComplex )
startComplex = self.getStartInsideBoundingRectangle( locationComplex, relativeStartComplex )
feedRateMultiplier = feedRateMinute / self.operatingFeedRatePerSecond / 60.0
splodgeLayerThickness = self.layerHeight / math.sqrt( feedRateMultiplier )
extraLayerThickness = splodgeLayerThickness - self.layerHeight
lift = extraLayerThickness * liftOverExtraThickness
startLine = self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.feedRateMinute, startComplex, location.z + lift )
self.addLineUnlessIdenticalReactivate( startLine )
self.addLineUnlessIdenticalReactivate('M101')
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
lineLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.distanceFeedRate.addGcodeMovementZWithFeedRate( feedRateMinute, locationComplex, lineLocation.z + lift )
return ''
def getStartInsideBoundingRectangle( self, locationComplex, relativeStartComplex ):
"Get a start inside the bounding rectangle."
startComplex = locationComplex + relativeStartComplex
if self.boundingRectangle.isPointInside( startComplex ):
return startComplex
for rotation in self.rotations:
rotatedRelativeStartComplex = relativeStartComplex * rotation
startComplex = locationComplex + rotatedRelativeStartComplex
if self.boundingRectangle.isPointInside( startComplex ):
return startComplex
return startComplex
def isJustBeforeExtrusion(self):
"Determine if activate command is before linear move command."
for lineIndex in xrange(self.lineIndex + 1, len(self.lines)):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1' or firstWord == 'M103':
return False
if firstWord == 'M101':
return True
return False
def parseInitialization( self, splodgeRepository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.addLineUnlessIdenticalReactivate(gcodec.getTagBracketedProcedure('splodge'))
return
elif firstWord == '(<layerHeight>':
self.layerHeight = float(splitLine[1])
elif firstWord == '(<operatingFeedRatePerSecond>':
self.operatingFeedRatePerSecond = float(splitLine[1])
elif firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
self.minimumQuantityLength = 0.1 * self.edgeWidth
self.addLineUnlessIdenticalReactivate(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
line = self.getSplodgeLine(line, location, splitLine)
self.oldLocation = location
elif firstWord == 'M101':
self.isExtruderActive = True
elif firstWord == 'M103':
self.isExtruderActive = False
self.addLineUnlessIdenticalReactivate(line)
def setRotations(self):
"Set the rotations."
self.rootHalf = math.sqrt( 0.5 )
self.rotations = []
self.rotations.append( complex( self.rootHalf, self.rootHalf ) )
self.rotations.append( complex( self.rootHalf, - self.rootHalf ) )
self.rotations.append( complex( 0.0, 1.0 ) )
self.rotations.append( complex(0.0, -1.0) )
self.rotations.append( complex( - self.rootHalf, self.rootHalf ) )
self.rotations.append( complex( - self.rootHalf, - self.rootHalf ) )
def main():
"Display the splodge dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
ModdedPA/android_external_chromium_org
|
refs/heads/kitkat
|
chrome/test/functional/pyauto_functional.py
|
56
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for PyAuto functional tests.
Use the following in your scripts to run them standalone:
# This should be at the top
import pyauto_functional
if __name__ == '__main__':
pyauto_functional.Main()
This script can be used as an executable to fire off other scripts, similar
to unittest.py
python pyauto_functional.py test_script
"""
import os
import subprocess
import sys
def _LocatePyAutoDir():
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, 'pyautolib'))
_LocatePyAutoDir()
import pyauto_paths
def RunWithCorrectPythonIfNecessary():
"""Runs this script with the correct version of python if necessary.
Different platforms and versions of pyautolib use different python versions.
Instead of requiring testers and infrastructure to handle choosing the right
version (and architecture), this will rerun the script with the correct
version of python.
Note, this function will either return after doing nothing, or will exit with
the subprocess's return code.
"""
def RunAgain():
"""Run the script again with the correct version of python.
Note, this function does not return, but exits with the return code of the
child.
"""
if sys.platform == 'cygwin' or sys.platform.startswith('win'):
cmd = [os.path.join(pyauto_paths.GetThirdPartyDir(), 'python_26',
'python_slave.exe')]
elif sys.platform.startswith('darwin'):
# Arch runs the specified architecture of a universal binary. Run
# the 32 bit one.
cmd = ['arch', '-i386', 'python2.6']
elif sys.platform.startswith('linux'):
cmd = ['python2.6']
cmd.extend(sys.argv)
print 'Running:', ' '.join(cmd)
proc = subprocess.Popen(cmd)
proc.wait()
sys.exit(proc.returncode)
def IsChromeOS():
lsb_release = '/etc/lsb-release'
if sys.platform.startswith('linux') and os.path.isfile(lsb_release):
with open(lsb_release) as fp:
contents = fp.read()
return 'CHROMEOS_RELEASE_NAME=' in contents
return False
# Ensure this is the right python version (2.6 for chrome, 2.7 for chromeOS).
if IsChromeOS():
if sys.version_info[0:2] != (2, 7):
cmd = ['python2.7'] + sys.argv
print 'Running: ', ' '.join(cmd)
proc = subprocess.Popen(cmd)
proc.wait()
else:
if sys.version_info[0:2] != (2, 6):
RunAgain()
# Check this is the right bitness on mac.
# platform.architecture() will not help us on mac, since multiple binaries
# are stuffed inside the universal python binary.
if sys.platform.startswith('darwin') and sys.maxint > 2**32:
# User is running 64-bit python, but we should use 32-bit.
RunAgain()
# Do not attempt to figure out python versions if
# DO_NOT_RESTART_PYTHON_FOR_PYAUTO is set.
if os.getenv('DO_NOT_RESTART_PYTHON_FOR_PYAUTO') is None:
RunWithCorrectPythonIfNecessary()
else:
print 'Will not try to restart with the correct version of python '\
'as DO_NOT_RESTART_PYTHON_FOR_PYAUTO is set.'
try:
import pyauto
except ImportError:
print >>sys.stderr, 'Cannot import pyauto from %s' % sys.path
raise
class Main(pyauto.Main):
"""Main program for running PyAuto functional tests."""
def __init__(self):
# Make scripts in this dir importable
sys.path.append(os.path.dirname(__file__))
pyauto.Main.__init__(self)
def TestsDir(self):
return os.path.dirname(__file__)
if __name__ == '__main__':
Main()
|
gustavo-guimaraes/siga
|
refs/heads/master
|
backend/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
1093
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
kerstin/moviepy
|
refs/heads/master
|
moviepy/audio/fx/audio_fadeout.py
|
18
|
from moviepy.decorators import audio_video_fx, requires_duration
import numpy as np
@audio_video_fx
@requires_duration
def audio_fadeout(clip, duration):
""" Return a sound clip where the sound fades out progressively
over ``duration`` seconds at the end of the clip. """
def fading(gf,t):
gft = gf(t)
if np.isscalar(t):
factor = min(1.0 * (clip.duration - t) / duration, 1)
factor = np.array([factor,factor])
else:
factor = np.minimum( 1.0 * (clip.duration - t) / duration, 1)
factor = np.vstack([factor,factor]).T
return factor * gft
return clip.fl(fading, keep_duration = True)
|
olivierdalang/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/gdal/GridDataMetrics.py
|
16
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridDataMetrics.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridDataMetrics(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
METRIC = 'METRIC'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
MIN_POINTS = 'MIN_POINTS'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.metrics = ((self.tr('Minimum'), 'minimum'),
(self.tr('Maximum'), 'maximum'),
(self.tr('Range'), 'range'),
(self.tr('Count'), 'count'),
(self.tr('Average distance'), 'average_distance'),
(self.tr('Average distance between points'), 'average_distance_pts'))
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterEnum(self.METRIC,
self.tr('Data metric to use'),
options=[i[0] for i in self.metrics],
allowMultiple=False,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_1,
self.tr('The first radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_2,
self.tr('The second radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.ANGLE,
self.tr('Angle of search ellipse rotation in degrees (counter clockwise)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=360.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.MIN_POINTS,
self.tr('Minimum number of data points to use'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (data metrics)')))
def name(self):
return 'griddatametrics'
def displayName(self):
return self.tr('Grid (Data metrics)')
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = ['-l']
arguments.append(layerName)
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
arguments.append(fieldName)
params = self.metrics[self.parameterAsEnum(parameters, self.METRIC, context)][1]
params += ':radius1={}'.format(self.parameterAsDouble(parameters, self.RADIUS_1, context))
params += ':radius2={}'.format(self.parameterAsDouble(parameters, self.RADIUS_2, context))
params += ':angle={}'.format(self.parameterAsDouble(parameters, self.ANGLE, context))
params += ':min_points={}'.format(self.parameterAsInt(parameters, self.MIN_POINTS, context))
params += ':nodata={}'.format(self.parameterAsDouble(parameters, self.NODATA, context))
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
cherrydocker/minos
|
refs/heads/master
|
client/deploy_kafka.py
|
5
|
#!/usr/bin/env python
import argparse
import os
import parallel_deploy
import service_config
import subprocess
import sys
import urlparse
import deploy_utils
from log import Log
ALL_JOBS = ["kafka", "kafkascribe"]
def _get_kafka_service_config(args):
args.kafka_config = deploy_utils.get_service_config(args)
def generate_configs(args, job_name, host_id, instance_id):
kafka_cfg_dict = args.kafka_config.configuration.generated_files["kafka.cfg"]
hosts = args.kafka_config.jobs[job_name].hosts
kafka_cfg_dict["broker.id"] = deploy_utils.get_task_id(hosts, host_id, instance_id)
kafka_cfg = deploy_utils.generate_properties_file(args, kafka_cfg_dict)
kafka_scribe_cfg_dict = args.kafka_config.configuration.generated_files["kafka-scribe.cfg"]
kafka_job = args.kafka_config.jobs["kafka"]
kafka_scribe_cfg_dict["metadata.broker.list"] = ",".join(
service_config.get_job_host_port_list(kafka_job))
kafka_scribe_cfg = deploy_utils.generate_properties_file(args, kafka_scribe_cfg_dict)
config_files = {
"kafka.cfg": kafka_cfg,
"kafka-scribe.cfg": kafka_scribe_cfg,
}
config_files.update(args.kafka_config.configuration.raw_files)
return config_files
def generate_run_scripts_params(args, host, job_name, host_id, instance_id):
job = args.kafka_config.jobs[job_name]
supervisor_client = deploy_utils.get_supervisor_client(host,
"kafka", args.kafka_config.cluster.name, job_name, instance_id=instance_id)
artifact_and_version = "kafka-" + args.kafka_config.cluster.version
jar_dirs = "$package_dir/*"
log_level = deploy_utils.get_service_log_level(args, args.kafka_config)
params = job.get_arguments(args, args.kafka_config.cluster, args.kafka_config.jobs,
args.kafka_config.arguments_dict, job_name, host_id, instance_id)
script_dict = {
"artifact": artifact_and_version,
"job_name": job_name,
"jar_dirs": jar_dirs,
"run_dir": supervisor_client.get_run_dir(),
"params": params,
}
return script_dict
def generate_start_script(args, host, job_name, host_id, instance_id):
script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)
return deploy_utils.create_run_script(
"%s/start.sh.tmpl" % deploy_utils.get_template_dir(),
script_params)
def install(args):
_get_kafka_service_config(args)
deploy_utils.install_service(args, "kafka", args.kafka_config, "kafka")
def cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):
deploy_utils.cleanup_job("kafka", args.kafka_config,
host, job_name, instance_id, cleanup_token)
def cleanup(args):
_get_kafka_service_config(args)
cleanup_token = deploy_utils.confirm_cleanup(args,
"kafka", args.kafka_config)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,
'cleanup', cleanup_token=cleanup_token)
parallel_deploy.start_deploy_threads(cleanup_job, task_list)
def bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):
# parse the service_config according to the instance_id
args.kafka_config.parse_generated_config_files(args, job_name, host_id, instance_id)
deploy_utils.bootstrap_job(args, "kafka", "kafka",
args.kafka_config, host, job_name, instance_id, cleanup_token, '0')
start_job(args, host, job_name, host_id, instance_id)
def bootstrap(args):
_get_kafka_service_config(args)
cleanup_token = deploy_utils.confirm_bootstrap("kafka", args.kafka_config)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,
'bootstrap', cleanup_token=cleanup_token)
parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
def start_job(args, host, job_name, host_id, instance_id, is_wait=False):
if is_wait:
deploy_utils.wait_for_job_stopping("kafka",
args.kafka_config.cluster.name, job_name, host, instance_id)
# parse the service_config according to the instance_id
args.kafka_config.parse_generated_config_files(args, job_name, host_id, instance_id)
config_files = generate_configs(args, job_name, host_id, instance_id)
start_script = generate_start_script(args, host, job_name, host_id, instance_id)
http_url = deploy_utils.get_http_service_uri(host,
args.kafka_config.jobs[job_name].base_port, instance_id)
deploy_utils.start_job(args, "kafka", "kafka", args.kafka_config,
host, job_name, instance_id, start_script, http_url, **config_files)
def start(args):
if not args.skip_confirm:
deploy_utils.confirm_start(args)
_get_kafka_service_config(args)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')
parallel_deploy.start_deploy_threads(start_job, task_list)
def stop_job(args, host, job_name, instance_id):
deploy_utils.stop_job("kafka", args.kafka_config, host, job_name, instance_id)
def stop(args):
if not args.skip_confirm:
deploy_utils.confirm_stop(args)
_get_kafka_service_config(args)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')
parallel_deploy.start_deploy_threads(stop_job, task_list)
def restart(args):
if not args.skip_confirm:
deploy_utils.confirm_restart(args)
_get_kafka_service_config(args)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')
parallel_deploy.start_deploy_threads(stop_job, task_list)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,
'start', is_wait=True)
parallel_deploy.start_deploy_threads(start_job, task_list)
def show_job(args, host, job_name, instance_id):
deploy_utils.show_job("kafka", args.kafka_config, host, job_name, instance_id)
def show(args):
_get_kafka_service_config(args)
for job_name in args.job or ALL_JOBS:
hosts = args.kafka_config.jobs[job_name].hosts
task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')
parallel_deploy.start_deploy_threads(show_job, task_list)
def run_shell(args):
Log.print_critical("'shell' command is not supported!")
def pack(args):
Log.print_critical("'pack' command is not supported!")
def rolling_update(args):
if not args.job:
Log.print_critical("You must specify the job name to do rolling update")
_get_kafka_service_config(args)
job_name = args.job[0]
if not args.skip_confirm:
deploy_utils.confirm_action(args, "rolling_update")
Log.print_info("Rolling updating %s" % job_name)
hosts = args.kafka_config.jobs[job_name].hosts
wait_time = 0
args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)
for host_id in args.task_map.keys() or hosts.iterkeys():
for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):
instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id
deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)
stop_job(args, hosts[host_id].ip, job_name, instance_id)
deploy_utils.wait_for_job_stopping("kafka",
args.kafka_config.cluster.name, job_name, hosts[host_id].ip, instance_id)
start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
deploy_utils.wait_for_job_starting("kafka",
args.kafka_config.cluster.name, job_name, hosts[host_id].ip, instance_id)
wait_time = args.time_interval
Log.print_success("Rolling updating %s success" % job_name)
if __name__ == '__main__':
test()
|
ramaseshan/Spirit
|
refs/heads/master
|
spirit/topic/managers.py
|
6
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.shortcuts import get_object_or_404
from django.db.models import Q, Prefetch
from ..comment.bookmark.models import CommentBookmark
class TopicQuerySet(models.QuerySet):
def unremoved(self):
return self.filter(Q(category__parent=None) | Q(category__parent__is_removed=False),
category__is_removed=False,
is_removed=False)
def public(self):
return self.filter(category__is_private=False)
def visible(self):
return self.unremoved().public()
def opened(self):
return self.filter(is_closed=False)
def global_(self):
return self.filter(category__is_global=True)
def for_category(self, category):
if category.is_subcategory:
return self.filter(category=category)
return self.filter(Q(category=category) | Q(category__parent=category))
def _access(self, user):
return self.filter(Q(category__is_private=False) | Q(topics_private__user=user))
def for_access(self, user):
return self.unremoved()._access(user=user)
def for_unread(self, user):
return self.filter(topicunread__user=user,
topicunread__is_read=False)
def with_bookmarks(self, user):
if not user.is_authenticated():
return self
user_bookmarks = CommentBookmark.objects\
.filter(user=user)\
.select_related('topic')
prefetch = Prefetch("commentbookmark_set", queryset=user_bookmarks, to_attr='bookmarks')
return self.prefetch_related(prefetch)
def get_public_or_404(self, pk, user):
if user.is_authenticated() and user.st.is_moderator:
return get_object_or_404(self.public()
.select_related('category__parent'),
pk=pk)
else:
return get_object_or_404(self.visible()
.select_related('category__parent'),
pk=pk)
def for_update_or_404(self, pk, user):
if user.st.is_moderator:
return get_object_or_404(self.public(), pk=pk)
else:
return get_object_or_404(self.visible().opened(), pk=pk, user=user)
|
seibert/numba
|
refs/heads/master
|
numba/tests/test_sets.py
|
1
|
import unittest
from collections import namedtuple
import contextlib
import itertools
import math
import random
import sys
import numpy as np
from numba.core.compiler import compile_isolated, Flags, errors
from numba import jit
from numba.core import types
import unittest
from numba.tests.support import (TestCase, enable_pyobj_flags, MemoryLeakMixin,
tag, compile_function)
Point = namedtuple('Point', ('a', 'b'))
def _build_set_literal_usecase(code, args):
code = code % {'initializer': ', '.join(repr(arg) for arg in args)}
return compile_function('build_set', code, globals())
def set_literal_return_usecase(args):
code = """if 1:
def build_set():
return {%(initializer)s}
"""
return _build_set_literal_usecase(code, args)
def set_literal_convert_usecase(args):
code = """if 1:
def build_set():
my_set = {%(initializer)s}
return list(my_set)
"""
return _build_set_literal_usecase(code, args)
def empty_constructor_usecase():
s = set()
s.add(1)
return len(s)
def constructor_usecase(arg):
s = set(arg)
return len(s)
def iterator_usecase(arg):
s = set(arg)
l = []
for v in s:
l.append(v)
return l
def update_usecase(a, b, c):
s = set()
s.update(a)
s.update(b)
s.update(c)
return list(s)
def bool_usecase(arg):
# Remove one element to allow for empty sets.
s = set(arg[1:])
return bool(s)
def remove_usecase(a, b):
s = set(a)
for v in b:
s.remove(v)
return list(s)
def discard_usecase(a, b):
s = set(a)
for v in b:
s.discard(v)
return list(s)
def add_discard_usecase(a, u, v):
s = set(a)
for i in range(1000):
s.add(u)
s.discard(v)
return list(s)
def pop_usecase(a):
s = set(a)
l = []
while len(s) > 0:
l.append(s.pop())
return l
def contains_usecase(a, b):
s = set(a)
l = []
for v in b:
l.append(v in s)
return l
def difference_update_usecase(a, b):
s = set(a)
s.difference_update(set(b))
return list(s)
def intersection_update_usecase(a, b):
s = set(a)
s.intersection_update(set(b))
return list(s)
def symmetric_difference_update_usecase(a, b):
s = set(a)
s.symmetric_difference_update(set(b))
return list(s)
def isdisjoint_usecase(a, b):
return set(a).isdisjoint(set(b))
def issubset_usecase(a, b):
return set(a).issubset(set(b))
def issuperset_usecase(a, b):
return set(a).issuperset(set(b))
def clear_usecase(a):
s = set(a)
s.clear()
return len(s), list(s)
def copy_usecase(a):
s = set(a)
ss = s.copy()
s.pop()
return len(ss), list(ss)
def copy_usecase_empty(a):
s = set(a)
s.clear()
ss = s.copy()
s.add(42)
return len(ss), list(ss)
def copy_usecase_deleted(a, b):
s = set(a)
s.remove(b)
ss = s.copy()
s.pop()
return len(ss), list(ss)
def difference_usecase(a, b):
sa = set(a)
s = sa.difference(set(b))
return list(s)
def intersection_usecase(a, b):
sa = set(a)
s = sa.intersection(set(b))
return list(s)
def symmetric_difference_usecase(a, b):
sa = set(a)
s = sa.symmetric_difference(set(b))
return list(s)
def union_usecase(a, b):
sa = set(a)
s = sa.union(set(b))
return list(s)
def set_return_usecase(a):
s = set(a)
return s
def make_operator_usecase(op):
code = """if 1:
def operator_usecase(a, b):
s = set(a) %(op)s set(b)
return list(s)
""" % dict(op=op)
return compile_function('operator_usecase', code, globals())
def make_inplace_operator_usecase(op):
code = """if 1:
def inplace_operator_usecase(a, b):
sa = set(a)
sb = set(b)
sc = sa
sc %(op)s sb
return list(sc), list(sa)
""" % dict(op=op)
return compile_function('inplace_operator_usecase', code, globals())
def make_comparison_usecase(op):
code = """if 1:
def comparison_usecase(a, b):
return set(a) %(op)s set(b)
""" % dict(op=op)
return compile_function('comparison_usecase', code, globals())
def noop(x):
pass
def unbox_usecase(x):
"""
Expect a set of numbers
"""
res = 0
for v in x:
res += v
return res
def unbox_usecase2(x):
"""
Expect a set of tuples
"""
res = 0
for v in x:
res += len(v)
return res
def unbox_usecase3(x):
"""
Expect a (number, set of numbers) tuple.
"""
a, b = x
res = a
for v in b:
res += v
return res
def unbox_usecase4(x):
"""
Expect a (number, set of tuples) tuple.
"""
a, b = x
res = a
for v in b:
res += len(v)
return res
def reflect_simple(sa, sb):
sa.add(42)
sa.update(sb)
return sa, len(sa), len(sb)
def reflect_conditional(sa, sb):
# `sa` may or may not actually reflect a Python set
if len(sb) > 1:
sa = set((11., 22., 33., 44.))
sa.add(42.)
sa.update(sb)
# Combine with a non-reflected set (to check method typing)
sc = set((55., 66.))
sa.symmetric_difference_update(sc)
return sa, len(sa), len(sb)
def reflect_exception(s):
s.add(42)
raise ZeroDivisionError
def reflect_dual(sa, sb):
sa.add(sb.pop())
return sa is sb
def unique_usecase(src):
seen = set()
res = []
for v in src:
if v not in seen:
seen.add(v)
res.append(v)
return res
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
self.rnd = random.Random(42)
def _range(self, stop):
return np.arange(int(stop))
def _random_choice(self, seq, n):
"""
Choose *n* possibly duplicate items from sequence.
"""
l = [self.rnd.choice(list(seq)) for i in range(n)]
if isinstance(seq, np.ndarray):
return np.array(l, dtype=seq.dtype)
else:
return l
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = self._range(np.sqrt(n))
return self._random_choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = self._range(n ** 1.3)
return self._random_choice(a, n)
def _assert_equal_unordered(self, a, b):
if isinstance(a, tuple):
self.assertIsInstance(b, tuple)
for u, v in zip(a, b):
self._assert_equal_unordered(u, v)
elif isinstance(a, list):
self.assertIsInstance(b, list)
self.assertPreciseEqual(sorted(a), sorted(b))
else:
self.assertPreciseEqual(a, b)
def unordered_checker(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(*args):
expected = pyfunc(*args)
got = cfunc(*args)
self._assert_equal_unordered(expected, got)
return check
class TestSetLiterals(BaseTest):
def test_build_set(self, flags=enable_pyobj_flags):
pyfunc = set_literal_return_usecase((1, 2, 3, 2))
self.run_nullary_func(pyfunc, flags=flags)
def test_build_heterogeneous_set(self, flags=enable_pyobj_flags):
pyfunc = set_literal_return_usecase((1, 2.0, 3j, 2))
self.run_nullary_func(pyfunc, flags=flags)
pyfunc = set_literal_return_usecase((2.0, 2))
got, expected = self.run_nullary_func(pyfunc, flags=flags)
self.assertIs(type(got.pop()), type(expected.pop()))
def test_build_set_nopython(self):
arg = list(self.sparse_array(50))
pyfunc = set_literal_convert_usecase(arg)
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(sorted(expected), sorted(got))
class TestSets(BaseTest):
def test_constructor(self):
pyfunc = empty_constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
pyfunc = constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
check((1, 2, 3, 2, 7))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_set_return(self):
pyfunc = set_return_usecase
cfunc = jit(nopython=True)(pyfunc)
arg = (1, 2, 3, 2, 7)
self.assertEqual(cfunc(arg), set(arg))
def test_iterator(self):
pyfunc = iterator_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 3, 2, 7))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_update(self):
pyfunc = update_usecase
check = self.unordered_checker(pyfunc)
a, b, c = (1, 2, 4, 9), (2, 3, 5, 11, 42), (4, 5, 6, 42)
check(a, b, c)
a = self.sparse_array(50)
b = self.duplicates_array(50)
c = self.sparse_array(50)
check(a, b, c)
def test_bool(self):
pyfunc = bool_usecase
check = self.unordered_checker(pyfunc)
check([1])
check([1, 2])
check([False, False])
check([True, False])
def test_remove(self):
pyfunc = remove_usecase
check = self.unordered_checker(pyfunc)
a = (1, 2, 3, 5, 8, 42)
b = (5, 2, 8)
check(a, b)
def test_remove_error(self):
# References are leaked on exception
self.disable_leak_check()
pyfunc = remove_usecase
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(KeyError) as raises:
cfunc((1, 2, 3), (5, ))
def test_refcounted_types_forbidden(self):
# References are leaked on exception
self.disable_leak_check()
pyfunc = constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(errors.LoweringError) as raises:
cfunc("abc")
excstr = str(raises.exception)
self.assertIn("Use of reference counted items in 'set()'", excstr)
self.assertIn("offending type is: 'unicode_type'", excstr)
def test_discard(self):
pyfunc = discard_usecase
check = self.unordered_checker(pyfunc)
a = (1, 2, 3, 5, 8, 42)
b = (5, 2, 8)
check(a, b)
a = self.sparse_array(50)
b = self.sparse_array(50)
check(a, b)
def test_add_discard(self):
"""
Check that the insertion logic does not create an infinite lookup
chain with deleted entries (insertion should happen at the first
deleted entry, not at the free entry at the end of the chain).
See issue #1913.
"""
pyfunc = add_discard_usecase
check = self.unordered_checker(pyfunc)
check((1,), 5, 5)
def test_pop(self):
pyfunc = pop_usecase
check = self.unordered_checker(pyfunc)
check((2, 3, 55, 11, 8, 42))
check(self.sparse_array(50))
def test_contains(self):
pyfunc = contains_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(a, b):
self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b))
a = (1, 2, 3, 5, 42)
b = (5, 2, 8, 3)
check(a, b)
def _test_xxx_update(self, pyfunc):
check = self.unordered_checker(pyfunc)
a, b = (1, 2, 4, 11), (2, 3, 5, 11, 42)
check(a, b)
sizes = (0, 50, 500)
for na, nb in itertools.product(sizes, sizes):
a = self.sparse_array(na)
b = self.sparse_array(nb)
check(a, b)
def test_difference_update(self):
self._test_xxx_update(difference_update_usecase)
def test_intersection_update(self):
self._test_xxx_update(intersection_update_usecase)
def test_symmetric_difference_update(self):
self._test_xxx_update(symmetric_difference_update_usecase)
def _test_comparator(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(a, b):
self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b))
a, b = map(set, [(1, 2, 4, 11), (2, 3, 5, 11, 42)])
args = [a & b, a - b, a | b, a ^ b]
args = [tuple(x) for x in args]
for a, b in itertools.product(args, args):
check(a, b)
def test_isdisjoint(self):
self._test_comparator(isdisjoint_usecase)
def test_issubset(self):
self._test_comparator(issubset_usecase)
def test_issuperset(self):
self._test_comparator(issuperset_usecase)
def test_clear(self):
pyfunc = clear_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11))
check(self.sparse_array(50))
def test_copy(self):
# Source set doesn't have any deleted entries
pyfunc = copy_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11))
check(self.sparse_array(50))
pyfunc = copy_usecase_empty
check = self.unordered_checker(pyfunc)
check((1,))
# Source set has deleted entries
pyfunc = copy_usecase_deleted
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11), 2)
a = self.sparse_array(50)
check(a, a[len(a) // 2])
def _test_set_operator(self, pyfunc):
check = self.unordered_checker(pyfunc)
a, b = (1, 2, 4, 11), (2, 3, 5, 11, 42)
check(a, b)
sizes = (0, 50, 500)
for na, nb in itertools.product(sizes, sizes):
a = self.sparse_array(na)
b = self.sparse_array(nb)
check(a, b)
def test_difference(self):
self._test_set_operator(difference_usecase)
def test_intersection(self):
self._test_set_operator(intersection_usecase)
def test_symmetric_difference(self):
self._test_set_operator(symmetric_difference_usecase)
def test_union(self):
self._test_set_operator(union_usecase)
def test_and(self):
self._test_set_operator(make_operator_usecase('&'))
def test_or(self):
self._test_set_operator(make_operator_usecase('|'))
def test_sub(self):
self._test_set_operator(make_operator_usecase('-'))
def test_xor(self):
self._test_set_operator(make_operator_usecase('^'))
def test_eq(self):
self._test_set_operator(make_comparison_usecase('=='))
def test_ne(self):
self._test_set_operator(make_comparison_usecase('!='))
def test_le(self):
self._test_set_operator(make_comparison_usecase('<='))
def test_lt(self):
self._test_set_operator(make_comparison_usecase('<'))
def test_ge(self):
self._test_set_operator(make_comparison_usecase('>='))
def test_gt(self):
self._test_set_operator(make_comparison_usecase('>'))
def test_iand(self):
self._test_set_operator(make_inplace_operator_usecase('&='))
def test_ior(self):
self._test_set_operator(make_inplace_operator_usecase('|='))
def test_isub(self):
self._test_set_operator(make_inplace_operator_usecase('-='))
def test_ixor(self):
self._test_set_operator(make_inplace_operator_usecase('^='))
class OtherTypesTest(object):
def test_constructor(self):
pyfunc = empty_constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
pyfunc = constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_iterator(self):
pyfunc = iterator_usecase
check = self.unordered_checker(pyfunc)
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_update(self):
pyfunc = update_usecase
check = self.unordered_checker(pyfunc)
a = self.sparse_array(50)
b = self.duplicates_array(50)
c = self.sparse_array(50)
check(a, b, c)
class TestFloatSets(OtherTypesTest, BaseTest):
"""
Test sets with floating-point keys.
"""
# Only a few basic tests here, as the sanity of most operations doesn't
# depend on the key type.
def _range(self, stop):
return np.arange(stop, dtype=np.float32) * np.float32(0.1)
class TestTupleSets(OtherTypesTest, BaseTest):
"""
Test sets with tuple keys.
"""
def _range(self, stop):
a = np.arange(stop, dtype=np.int64)
b = a & 0x5555555555555555
c = (a & 0xaaaaaaaa).astype(np.int32)
d = ((a >> 32) & 1).astype(np.bool_)
return list(zip(b, c, d))
class TestUnboxing(BaseTest):
"""
Test unboxing of Python sets into native Numba sets.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertRegexpMatches(str(raises.exception), msg)
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check(set([1, 2]))
check(set([1j, 2.5j]))
# Check allocation and sizing
check(set(range(100)))
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check(set([(1, 2), (3, 4)]))
check(set([(1, 2j), (3, 4j)]))
def test_set_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, set([2, 3, 4])))
def test_set_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, set([(2,), (3,)])))
def test_errors(self):
# Error checking should ensure the set is homogeneous
msg = "can't unbox heterogeneous set"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
val = set([1, 2.5])
with self.assert_type_error(msg):
cfunc(val)
# The set hasn't been changed (bogus reflecting)
self.assertEqual(val, set([1, 2.5]))
with self.assert_type_error(msg):
cfunc(set([1, 2j]))
# Same when the set is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, set([1, 2j])))
with self.assert_type_error(msg):
cfunc(Point(1, set([1, 2j])))
# Tuples of different size.
# Note the check is really on the tuple side.
lst = set([(1,), (2, 3)])
# Depending on which tuple is examined first, we could get
# a IndexError or a ValueError.
with self.assertRaises((IndexError, ValueError)) as raises:
cfunc(lst)
class TestSetReflection(BaseTest):
"""
Test reflection of native Numba sets on Python set objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [(set([1., 2., 3., 4.]), set([0.])),
(set([1., 2., 3., 4.]), set([5., 6., 7., 8., 9.])),
]
for dest, src in samples:
expected = set(dest)
got = set(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, sets should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
s = set([1, 2, 3])
with self.assertRefCount(s):
with self.assertRaises(ZeroDivisionError):
cfunc(s)
self.assertPreciseEqual(s, set([1, 2, 3, 42]))
def test_reflect_same_set(self):
"""
When the same set object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pyset = set([1, 2, 3])
cset = pyset.copy()
expected = pyfunc(pyset, pyset)
got = cfunc(cset, cset)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pyset, cset)
self.assertPreciseEqual(sys.getrefcount(pyset), sys.getrefcount(cset))
def test_reflect_clean(self):
"""
When the set wasn't mutated, no reflection should take place.
"""
cfunc = jit(nopython=True)(noop)
# Use a complex, as Python integers can be cached
s = set([12.5j])
ids = [id(x) for x in s]
cfunc(s)
self.assertEqual([id(x) for x in s], ids)
class TestExamples(BaseTest):
"""
Examples of using sets.
"""
def test_unique(self):
pyfunc = unique_usecase
check = self.unordered_checker(pyfunc)
check(self.duplicates_array(200))
check(self.sparse_array(200))
if __name__ == '__main__':
unittest.main()
|
rodxavier/open-pse-initiative
|
refs/heads/master
|
django_project/api/renderers/csv_renderers.py
|
1
|
from rest_framework_csv.renderers import CSVRenderer
from api.serializers import QuoteSerializer
class QuoteCSVRenderer(CSVRenderer):
headers = QuoteSerializer().get_fields()
|
yceruto/django
|
refs/heads/master
|
tests/schema/tests.py
|
2
|
from __future__ import absolute_import
import datetime
import unittest
from django.test import TransactionTestCase
from django.db import connection, DatabaseError, IntegrityError
from django.db.models.fields import IntegerField, TextField, CharField, SlugField
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.transaction import atomic
from .models import (Author, AuthorWithM2M, Book, BookWithLongName,
BookWithSlug, BookWithM2M, Tag, TagIndexed, TagM2MTest, TagUniqueRename,
UniqueTest)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithM2M, Book, BookWithLongName, BookWithSlug,
BookWithM2M, Tag, TagIndexed, TagM2MTest, TagUniqueRename, UniqueTest,
]
# Utility functions
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
cursor = connection.cursor()
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in self.models:
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = field.rel.through._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = model._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
cursor = connection.cursor()
columns = dict(
(d[0], (connection.introspection.get_field_type(d[1], d), d))
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
)
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(
Book,
Book._meta.get_field_by_name("author")[0],
new_field,
strict=True,
)
# Make sure the new FK constraint is present
constraints = connection.introspection.get_constraints(connection.cursor(), Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Alter the name field to a TextField
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("name")[0],
new_field,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
new_field,
new_field2,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), False)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("name")[0],
new_field,
strict=True,
)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
def test_m2m_create(self):
"""
Tests M2M fields on models during creation
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(BookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(BookWithM2M._meta.get_field_by_name("tags")[0].rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m(self):
"""
Tests adding/removing M2M fields on models
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = ManyToManyField("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(AuthorWithM2M, "tags")
try:
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(
Author,
new_field,
)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.rel.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(
Author,
new_field,
)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.rel.through)
finally:
# Cleanup model states
AuthorWithM2M._meta.local_many_to_many.remove(new_field)
def test_m2m_repoint(self):
"""
Tests repointing M2M fields
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = connection.introspection.get_constraints(connection.cursor(), BookWithM2M._meta.get_field_by_name("tags")[0].rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
new_field = ManyToManyField(UniqueTest)
new_field.contribute_to_class(BookWithM2M, "uniques")
try:
with connection.schema_editor() as editor:
editor.alter_field(
Author,
BookWithM2M._meta.get_field_by_name("tags")[0],
new_field,
)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, BookWithM2M._meta.get_field_by_name("tags")[0].rel.through)
# Ensure the new M2M exists and points to UniqueTest
constraints = connection.introspection.get_constraints(connection.cursor(), new_field.rel.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
finally:
# Cleanup through table separately
with connection.schema_editor() as editor:
editor.remove_field(BookWithM2M, BookWithM2M._meta.get_field_by_name("uniques")[0])
# Cleanup model states
BookWithM2M._meta.local_many_to_many.remove(new_field)
del BookWithM2M._meta._m2m_cache
@unittest.skipUnless(connection.features.supports_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = connection.introspection.get_constraints(connection.cursor(), Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
Author,
Author._meta.get_field_by_name("height")[0],
new_field,
strict=True,
)
constraints = connection.introspection.get_constraints(connection.cursor(), Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
with connection.schema_editor() as editor:
editor.alter_field(
Author,
new_field,
Author._meta.get_field_by_name("height")[0],
strict=True,
)
constraints = connection.introspection.get_constraints(connection.cursor(), Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
new_field,
strict=True,
)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
new_field,
new_new_field,
strict=True,
)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
TagUniqueRename._meta.get_field_by_name("slug2")[0],
strict=True,
)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to it's non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest,
UniqueTest._meta.unique_together,
[],
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest,
[],
UniqueTest._meta.unique_together,
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in connection.introspection.get_constraints(connection.cursor(), "schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(
Tag,
[],
[("slug", "title")],
)
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in connection.introspection.get_constraints(connection.cursor(), "schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_new_field = SlugField(unique=True)
new_new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(
Tag,
[("slug", "title")],
[],
)
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in connection.introspection.get_constraints(connection.cursor(), "schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in connection.introspection.get_constraints(connection.cursor(), "schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(
Author,
"schema_author",
"schema_otherauthor",
)
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(
Author,
"schema_otherauthor",
"schema_author",
)
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
connection.introspection.get_indexes(connection.cursor(), Book._meta.db_table),
)
# Alter to remove the index
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(
Book,
Book._meta.get_field_by_name("title")[0],
new_field,
strict=True,
)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
connection.introspection.get_indexes(connection.cursor(), Book._meta.db_table),
)
# Alter to re-add the index
with connection.schema_editor() as editor:
editor.alter_field(
Book,
new_field,
Book._meta.get_field_by_name("title")[0],
strict=True,
)
# Ensure the table is there and has the index again
self.assertIn(
"title",
connection.introspection.get_indexes(connection.cursor(), Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
with connection.schema_editor() as editor:
editor.add_field(
Book,
BookWithSlug._meta.get_field_by_name("slug")[0],
)
self.assertIn(
"slug",
connection.introspection.get_indexes(connection.cursor(), Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field2 = CharField(max_length=20, unique=False)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(
BookWithSlug,
BookWithSlug._meta.get_field_by_name("slug")[0],
new_field2,
strict=True,
)
self.assertNotIn(
"slug",
connection.introspection.get_indexes(connection.cursor(), Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
connection.introspection.get_indexes(connection.cursor(), Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.remove_field(Tag, Tag._meta.get_field_by_name("id")[0])
editor.alter_field(
Tag,
Tag._meta.get_field_by_name("slug")[0],
new_field,
)
# Ensure the PK changed
self.assertNotIn(
'id',
connection.introspection.get_indexes(connection.cursor(), Tag._meta.db_table),
)
self.assertTrue(
connection.introspection.get_indexes(connection.cursor(), Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497. Only affects databases that supports
foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
connection.introspection.get_indexes(connection.cursor(), BookWithLongName._meta.db_table),
)
|
ITCase/sacrud_deform
|
refs/heads/master
|
setup.py
|
2
|
import os
from setuptools import setup
here = os.path.dirname(os.path.realpath(__file__))
def read(name):
with open(os.path.join(here, name)) as f:
return f.read()
setup(
name='sacrud_deform',
version="0.1.6",
url='http://github.com/sacrud/sacrud_deform/',
author='Svintsov Dmitry',
author_email='root@uralbash.ru',
packages=['sacrud_deform', ],
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
license="MIT",
package_dir={'sacrud_deform': 'sacrud_deform'},
description='Form generator for SQLAlchemy models.',
long_description=read('README.rst'),
install_requires=read('requirements.txt'),
tests_require=read('requirements.txt') + read('requirements-test.txt'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Framework :: Pyramid ",
"Topic :: Internet",
"Topic :: Database",
],
)
|
wangxuan007/flasky
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py
|
20
|
# sql/compiler.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]*)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = 'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# if False, means we can't be sure the list of entries
# in _result_columns is actually the rendered order. This
# gets flipped when we use TextAsFrom, for example.
self._ordered_columns = True
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = with_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict:
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = False
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def visit_binary(self, binary, override_operator=None, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = getattr(self, "visit_%s_binary" % operator_.__name__, None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and self._is_toplevel_select(select):
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _is_toplevel_select(self, select):
"""Return True if the stack is placed at the given select, and
is also the outermost SELECT, meaning there is either no stack
before this one, or the enclosing stack is a topmost INSERT.
"""
return (
self.stack[-1]['selectable'] is select and
(
len(self.stack) == 1 or self.isinsert and len(self.stack) == 2
and self.statement is self.stack[0]['selectable']
)
)
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
if use_schema and getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
self.isinsert = True
crud_params = crud._get_crud_params(self, insert_stmt, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"asfrom_froms": set([update_stmt.table]),
"selectable": update_stmt})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self)
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"asfrom_froms": set([delete_stmt.table]),
"selectable": delete_stmt})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] +
table._prefixes +
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints=
create.include_foreign_key_constraints)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if (not self.omit_schema and use_schema and
sequence.schema is not None):
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
qma/pants
|
refs/heads/master
|
src/python/pants/cache/restful_artifact_cache.py
|
3
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import urlparse
import requests
from requests import RequestException
from pants.cache.artifact_cache import (ArtifactCache, ArtifactCacheError,
NonfatalArtifactCacheError, UnreadableArtifact)
logger = logging.getLogger(__name__)
# Reduce the somewhat verbose logging of requests.
# TODO do this in a central place
logging.getLogger('requests').setLevel(logging.WARNING)
class InvalidRESTfulCacheProtoError(ArtifactCacheError):
"""Indicates an invalid protocol used in a remote spec."""
pass
class RequestsSession(object):
_session = None
@classmethod
def instance(cls):
if cls._session is None:
cls._session = requests.Session()
return cls._session
class RESTfulArtifactCache(ArtifactCache):
"""An artifact cache that stores the artifacts on a RESTful service."""
READ_SIZE_BYTES = 4 * 1024 * 1024
def __init__(self, artifact_root, url_base, local):
"""
:param str artifact_root: The path under which cacheable products will be read/written.
:param str url_base: The prefix for urls on some RESTful service. We must be able to PUT and
GET to any path under this base.
:param BaseLocalArtifactCache local: local cache instance for storing and creating artifacts
"""
super(RESTfulArtifactCache, self).__init__(artifact_root)
parsed_url = urlparse.urlparse(url_base)
if parsed_url.scheme == 'http':
self._ssl = False
elif parsed_url.scheme == 'https':
self._ssl = True
else:
raise InvalidRESTfulCacheProtoError(
'RESTfulArtifactCache only supports HTTP(S). Found: {0}'.format(parsed_url.scheme))
self._timeout_secs = 4.0
self._netloc = parsed_url.netloc
self._path_prefix = parsed_url.path.rstrip(b'/')
self._localcache = local
def try_insert(self, cache_key, paths):
# Delegate creation of artifact to local cache.
with self._localcache.insert_paths(cache_key, paths) as tarfile:
# Upload local artifact to remote cache.
with open(tarfile, 'rb') as infile:
remote_path = self._remote_path_for_key(cache_key)
if not self._request('PUT', remote_path, body=infile):
url = self._url_string(remote_path)
raise NonfatalArtifactCacheError('Failed to PUT to {0}.'.format(url))
def has(self, cache_key):
if self._localcache.has(cache_key):
return True
return self._request('HEAD', self._remote_path_for_key(cache_key)) is not None
def use_cached_files(self, cache_key, results_dir=None):
if self._localcache.has(cache_key):
return self._localcache.use_cached_files(cache_key, results_dir)
remote_path = self._remote_path_for_key(cache_key)
try:
response = self._request('GET', remote_path)
if response is not None:
# Delegate storage and extraction to local cache
byte_iter = response.iter_content(self.READ_SIZE_BYTES)
return self._localcache.store_and_use_artifact(cache_key, byte_iter, results_dir)
except Exception as e:
logger.warn('\nError while reading from remote artifact cache: {0}\n'.format(e))
return UnreadableArtifact(cache_key, e)
return False
def delete(self, cache_key):
self._localcache.delete(cache_key)
remote_path = self._remote_path_for_key(cache_key)
self._request('DELETE', remote_path)
def _remote_path_for_key(self, cache_key):
return '{0}/{1}/{2}.tgz'.format(self._path_prefix, cache_key.id, cache_key.hash)
# Returns a response if we get a 200, None if we get a 404 and raises an exception otherwise.
def _request(self, method, path, body=None):
url = self._url_string(path)
logger.debug('Sending {0} request to {1}'.format(method, url))
session = RequestsSession.instance()
try:
response = None
if 'PUT' == method:
response = session.put(url, data=body, timeout=self._timeout_secs)
elif 'GET' == method:
response = session.get(url, timeout=self._timeout_secs, stream=True)
elif 'HEAD' == method:
response = session.head(url, timeout=self._timeout_secs)
elif 'DELETE' == method:
response = session.delete(url, timeout=self._timeout_secs)
else:
raise ValueError('Unknown request method {0}'.format(method))
# Allow all 2XX responses. E.g., nginx returns 201 on PUT. HEAD may return 204.
if int(response.status_code / 100) == 2:
return response
elif response.status_code == 404:
logger.debug('404 returned for {0} request to {1}'.format(method, self._url_string(path)))
return None
else:
raise NonfatalArtifactCacheError('Failed to {0} {1}. Error: {2} {3}'.format(method,
self._url_string(path),
response.status_code,
response.reason))
except RequestException as e:
raise NonfatalArtifactCacheError(e)
def _url_string(self, path):
proto = 'http'
if self._ssl:
proto = 'https'
return '{0}://{1}{2}'.format(proto, self._netloc, path)
|
mikrosimage/rez
|
refs/heads/20160619_master.mikros.1
|
src/rez/vendor/enum/__init__.py
|
33
|
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [m for m in self.__class__.__dict__ if m[0] != '_']
return (['__class__', '__doc__', '__module__', 'name', 'value'] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
|
furushchev/mongodb_store
|
refs/heads/hydro-devel
|
mongodb_log/scripts/mongodb_log.py
|
2
|
#!/usr/bin/python
###########################################################################
# mongodb_log.py - Python based ROS to MongoDB logger (multi-process)
#
# Created: Sun Dec 05 19:45:51 2010
# Copyright 2010-2012 Tim Niemueller [www.niemueller.de]
# 2010-2011 Carnegie Mellon University
# 2010 Intel Labs Pittsburgh
###########################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# Read the full text in the LICENSE.GPL file in the doc directory.
# make sure we aren't using floor division
from __future__ import division, with_statement
PACKAGE_NAME='mongodb_log'
NODE_NAME='mongodb_log'
NODE_NAME_TEMPLATE='%smongodb_log'
WORKER_NODE_NAME = "%smongodb_log_worker_%d_%s"
QUEUE_MAXSIZE = 100
# import roslib; roslib.load_manifest(PACKAGE_NAME)
import rospy
# for msg_to_document
import mongodb_store.util
import os
import re
import sys
import time
import pprint
import string
import signal
import subprocess
from threading import Thread, Timer
from Queue import Empty
from optparse import OptionParser
from tempfile import mktemp
from datetime import datetime, timedelta
from time import sleep
from random import randint
from tf.msg import tfMessage
from sensor_msgs.msg import PointCloud, CompressedImage
from roslib.packages import find_node
#from rviz_intel.msg import TriangleMesh
use_setproctitle = True
try:
from setproctitle import setproctitle
except ImportError:
use_setproctitle = False
use_processes = False
# if use_processes:
from multiprocessing import Process, Lock, Condition, Queue, Value, current_process, Event
import multiprocessing as mp
# else:
# from threading import Lock, Condition, Event
# from Queue import Queue
# def Value(t, val, lock=None):
# return val
import genpy
import rosgraph.masterapi
import roslib.message
#from rospy import Time, Duration
import rostopic
from pymongo import SLOW_ONLY
from pymongo.errors import InvalidDocument, InvalidStringData
MongoClient = mongodb_store.util.import_MongoClient()
BACKLOG_WARN_LIMIT = 100
STATS_LOOPTIME = 10
STATS_GRAPHTIME = 60
class Counter(object):
def __init__(self, value = None, lock = True):
self.count = value or Value('i', 0, lock=lock)
self.mutex = Lock()
def increment(self, by = 1):
with self.mutex: self.count.value += by
def value(self):
with self.mutex: return self.count.value
class Barrier(object):
def __init__(self, num_threads):
self.num_threads = num_threads
self.threads_left = Value('i', num_threads, lock=True)
self.mutex = Lock()
self.waitcond = Condition(self.mutex)
def wait(self):
self.mutex.acquire()
self.threads_left.value -= 1
if self.threads_left.value == 0:
self.threads_left.value = self.num_threads
self.waitcond.notify_all()
self.mutex.release()
else:
self.waitcond.wait()
self.mutex.release()
class WorkerProcess(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = Value('i', 0)
# print "Creating process %s" % self.name
self.process = Process(name=self.name, target=self.run)
# self.process = Thread(name=self.name, target=self.run)
# print "created %s" % self.process
self.process.start()
# print "started %s" % self.process
def init(self):
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log %s" % self.topic)
self.mongoconn = MongoClient(self.mongodb_host, self.mongodb_port)
self.mongodb = self.mongoconn[self.mongodb_name]
self.mongodb.set_profiling_level = SLOW_ONLY
self.collection = self.mongodb[self.collname]
self.collection.count()
self.queue.cancel_join_thread()
# clear signal handlers in this child process, rospy will handle signals for us
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
worker_node_name = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
# print "Calling init_node with %s from process %s" % (worker_node_name, mp.current_process())
rospy.init_node(worker_node_name, anonymous=False)
self.subscriber = None
while not self.subscriber and not self.is_quit():
try:
msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
except rostopic.ROSTopicIOException:
print("FAILED to subscribe, will keep trying %s" % self.name)
time.sleep(randint(1,10))
except rospy.ROSInitException:
print("FAILED to initialize, will keep trying %s" % self.name)
time.sleep(randint(1,10))
self.subscriber = None
def run(self):
self.init()
print("ACTIVE: %s" % self.name)
# run the thread
self.dequeue()
# free connection
# self.mongoconn.end_request()
def is_quit(self):
return self.quit.value == 1
def shutdown(self):
if not self.is_quit():
#print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
self.quit.value = 1
self.queue.put("shutdown")
while not self.queue.empty(): sleep(0.1)
#print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
self.process.join()
self.process.terminate()
def qsize(self):
return self.queue.qsize()
def enqueue(self, data, topic, current_time=None):
if not self.is_quit():
if self.queue.full():
try:
self.queue.get_nowait()
self.drop_counter.increment()
self.worker_drop_counter.increment()
except Empty:
pass
#self.queue.put((topic, data, current_time or datetime.now()))
self.queue.put((topic, data, rospy.get_time(), data._connection_header))
self.in_counter.increment()
self.worker_in_counter.increment()
def dequeue(self):
while not self.is_quit():
t = None
try:
t = self.queue.get(True)
except IOError:
# Anticipate Ctrl-C
#print("Quit W1: %s" % self.name)
self.quit.value = 1
break
if isinstance(t, tuple):
self.out_counter.increment()
self.worker_out_counter.increment()
topic = t[0]
msg = t[1]
ctime = t[2]
connection_header = t[3]
if isinstance(msg, rospy.Message):
try:
#print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
#pprint.pprint(doc)
meta = {}
# switched to use inserted_at to match message_store
# meta["recorded"] = ctime or datetime.now()
meta["topic"] = topic
if connection_header['latching'] == '1':
meta['latch'] = True
else:
meta['latch'] = False
if ctime is not None:
meta['inserted_at'] = datetime.utcfromtimestamp(ctime)
else:
meta['inserted_at'] = datetime.utcfromtimestamp(rospy.get_rostime().to_sec())
mongodb_store.util.store_message(self.collection, msg, meta)
except InvalidDocument, e:
print("InvalidDocument " + current_process().name + "@" + topic +": \n")
print e
except InvalidStringData, e:
print("InvalidStringData " + current_process().name + "@" + topic +": \n")
print e
else:
#print("Quit W2: %s" % self.name)
self.quit.value = 1
# we must make sure to clear the queue before exiting,
# or the parent thread might deadlock otherwise
#print("Quit W3: %s" % self.name)
self.subscriber.unregister()
self.subscriber = None
while not self.queue.empty():
t = self.queue.get_nowait()
print("STOPPED: %s" % self.name)
class SubprocessWorker(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix, cpp_logger):
self.name = "SubprocessWorker-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = False
self.qsize = 0
self.thread = Thread(name=self.name, target=self.run)
mongodb_host_port = "%s:%d" % (mongodb_host, mongodb_port)
collection = "%s.%s" % (mongodb_name, collname)
nodename = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
self.process = subprocess.Popen([cpp_logger[0], "-t", topic, "-n", nodename,
"-m", mongodb_host_port, "-c", collection],
stdout=subprocess.PIPE)
self.thread.start()
def qsize(self):
return self.qsize
def run(self):
while not self.quit:
line = self.process.stdout.readline().rstrip()
if line == "": continue
arr = string.split(line, ":")
self.in_counter.increment(int(arr[0]))
self.out_counter.increment(int(arr[1]))
self.drop_counter.increment(int(arr[2]))
self.qsize = int(arr[3])
self.worker_in_counter.increment(int(arr[0]))
self.worker_out_counter.increment(int(arr[1]))
self.worker_drop_counter.increment(int(arr[2]))
def shutdown(self):
self.quit = True
self.process.kill()
self.process.wait()
class MongoWriter(object):
def __init__(self, topics = [],
all_topics = False, all_topics_interval = 5,
exclude_topics = [],
mongodb_host=None, mongodb_port=None, mongodb_name="roslog",
no_specific=False, nodename_prefix=""):
self.all_topics = all_topics
self.all_topics_interval = all_topics_interval
self.exclude_topics = exclude_topics
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.no_specific = no_specific
self.nodename_prefix = nodename_prefix
self.quit = False
self.topics = set()
#self.str_fn = roslib.message.strify_message
self.sep = "\n" #'\033[2J\033[;H'
self.in_counter = Counter()
self.out_counter = Counter()
self.drop_counter = Counter()
self.workers = {}
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log MAIN")
self.exclude_regex = []
for et in self.exclude_topics:
self.exclude_regex.append(re.compile(et))
self.exclude_already = []
self.subscribe_topics(set(topics))
if self.all_topics:
print("All topics")
self.ros_master = rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % self.nodename_prefix)
self.update_topics(restart=False)
self.start_all_topics_timer()
def subscribe_topics(self, topics):
# print "existing topics %s" % self.topics
# print "subscribing to topics %s" % topics
for topic in topics:
if topic and topic[-1] == '/':
topic = topic[:-1]
if topic in self.topics: continue
if topic in self.exclude_already: continue
do_continue = False
for tre in self.exclude_regex:
if tre.match(topic):
print("*** IGNORING topic %s due to exclusion rule" % topic)
do_continue = True
self.exclude_already.append(topic)
break
if do_continue: continue
# although the collections is not strictly necessary, since MongoDB could handle
# pure topic names as collection names and we could then use mongodb[topic], we want
# to have names that go easier with the query tools, even though there is the theoretical
# possibility of name clashes (hence the check)
collname = mongodb_store.util.topic_name_to_collection_name(topic)
if collname in self.workers.keys():
print("Two converted topic names clash: %s, ignoring topic %s"
% (collname, topic))
else:
try:
print("Adding topic %s" % topic)
self.workers[collname] = self.create_worker(len(self.workers), topic, collname)
self.topics |= set([topic])
except Exception, e:
print('Failed to subsribe to %s due to %s' % (topic, e))
def create_worker(self, idnum, topic, collname):
msg_class, real_topic, msg_eval = rostopic.get_topic_class(topic, blocking=True)
w = None
node_path = None
if not self.no_specific and msg_class == tfMessage:
print("DETECTED transform topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_tf")
if not node_path:
print("FAILED to detect mongodb_log_tf, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == PointCloud:
print("DETECTED point cloud topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_pcl")
if not node_path:
print("FAILED to detect mongodb_log_pcl, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == CompressedImage:
print("DETECTED compressed image topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_cimg")
if not node_path:
print("FAILED to detect mongodb_log_cimg, falling back to generic logger (did not build package?)")
"""
elif msg_class == TriangleMesh:
print("DETECTED triangle mesh topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_trimesh")
if not node_path:
print("FAILED to detect mongodb_log_trimesh, falling back to generic logger (did not build package?)")
"""
if node_path:
w = SubprocessWorker(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix, node_path)
if not w:
print("GENERIC Python logger used for topic %s" % topic)
w = WorkerProcess(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix)
return w
def run(self):
looping_threshold = timedelta(0, STATS_LOOPTIME, 0)
while not self.quit:
started = datetime.now()
# the following code makes sure we run once per STATS_LOOPTIME, taking
# varying run-times and interrupted sleeps into account
td = datetime.now() - started
while not self.quit and td < looping_threshold:
sleeptime = STATS_LOOPTIME - (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
if sleeptime > 0: sleep(sleeptime)
td = datetime.now() - started
def shutdown(self):
self.quit = True
if hasattr(self, "all_topics_timer"): self.all_topics_timer.cancel()
for name, w in self.workers.items():
#print("Shutdown %s" % name)
w.shutdown()
def start_all_topics_timer(self):
if not self.all_topics or self.quit: return
self.all_topics_timer = Timer(self.all_topics_interval, self.update_topics)
self.all_topics_timer.start()
def update_topics(self, restart=True):
"""
Called at a fixed interval (see start_all_topics_timer) to update the list of topics if we are logging all topics (e.g. --all-topics flag is given).
"""
if not self.all_topics or self.quit: return
ts = rospy.get_published_topics()
topics = set([t for t, t_type in ts if t != "/rosout" and t != "/rosout_agg"])
new_topics = topics - self.topics
self.subscribe_topics(new_topics)
if restart: self.start_all_topics_timer()
def get_memory_usage_for_pid(self, pid):
scale = {'kB': 1024, 'mB': 1024 * 1024,
'KB': 1024, 'MB': 1024 * 1024}
try:
f = open("/proc/%d/status" % pid)
t = f.read()
f.close()
except:
return (0, 0, 0)
if t == "": return (0, 0, 0)
try:
tmp = t[t.index("VmSize:"):].split(None, 3)
size = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmRSS:"):].split(None, 3)
rss = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmStk:"):].split(None, 3)
stack = int(tmp[1]) * scale[tmp[2]]
return (size, rss, stack)
except ValueError:
return (0, 0, 0)
def get_memory_usage(self):
size, rss, stack = 0, 0, 0
for _, w in self.workers.items():
pmem = self.get_memory_usage_for_pid(w.process.pid)
size += pmem[0]
rss += pmem[1]
stack += pmem[2]
#print("Size: %d RSS: %s Stack: %s" % (size, rss, stack))
return (size, rss, stack)
def main(argv):
parser = OptionParser()
parser.usage += " [TOPICs...]"
parser.add_option("--nodename-prefix", dest="nodename_prefix",
help="Prefix for worker node names", metavar="ROS_NODE_NAME",
default="")
parser.add_option("--mongodb-host", dest="mongodb_host",
help="Hostname of MongoDB", metavar="HOST",
default=rospy.get_param("mongodb_host", "localhost"))
parser.add_option("--mongodb-port", dest="mongodb_port",
help="Hostname of MongoDB", type="int",
metavar="PORT", default=rospy.get_param("mongodb_port", 27017))
parser.add_option("--mongodb-name", dest="mongodb_name",
help="Name of DB in which to store values",
metavar="NAME", default="roslog")
parser.add_option("-a", "--all-topics", dest="all_topics", default=False,
action="store_true",
help="Log all existing topics (still excludes /rosout, /rosout_agg)")
parser.add_option("--all-topics-interval", dest="all_topics_interval", default=5,
help="Time in seconds between checks for new topics", type="int")
parser.add_option("-x", "--exclude", dest="exclude",
help="Exclude topics matching REGEX, may be given multiple times",
action="append", type="string", metavar="REGEX", default=[])
parser.add_option("--no-specific", dest="no_specific", default=False,
action="store_true", help="Disable specific loggers")
(options, args) = parser.parse_args()
if not options.all_topics and len(args) == 0:
parser.print_help()
return
try:
rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % options.nodename_prefix).getPid()
except socket.error:
print("Failed to communicate with master")
mongowriter = MongoWriter(topics=args,
all_topics=options.all_topics,
all_topics_interval = options.all_topics_interval,
exclude_topics = options.exclude,
mongodb_host=options.mongodb_host,
mongodb_port=options.mongodb_port,
mongodb_name=options.mongodb_name,
no_specific=options.no_specific,
nodename_prefix=options.nodename_prefix)
def signal_handler(signal, frame):
mongowriter.shutdown()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
mongowriter.run()
if __name__ == "__main__":
main(sys.argv)
|
jm-begon/scikit-learn
|
refs/heads/master
|
examples/datasets/plot_digits_last_image.py
|
386
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Digit Dataset
=========================================================
This dataset is made up of 1797 8x8 images. Each image,
like the one shown below, is of a hand-written digit.
In order to utilize an 8x8 figure like this, we'd have to
first transform it into a feature vector with length 64.
See `here
<http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits>`_
for more information about this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
from sklearn import datasets
import matplotlib.pyplot as plt
#Load the digits dataset
digits = datasets.load_digits()
#Display the first digit
plt.figure(1, figsize=(3, 3))
plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
|
alon/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_callback_interface.py
|
142
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isCallback(), "Interface should be a callback")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface : TestInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow non-callback parent of callback interface")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface : TestCallbackInterface {
};
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow callback parent of non-callback interface")
parser = parser.reset()
parser.parse("""
callback interface TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface2 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
};
callback interface TestCallbackInterface3 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
};
callback interface TestCallbackInterface4 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
const long baz = 5;
};
callback interface TestCallbackInterface5 {
static attribute boolean bool;
void foo();
};
callback interface TestCallbackInterface6 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
void bar();
};
callback interface TestCallbackInterface7 {
static attribute boolean bool;
};
callback interface TestCallbackInterface8 {
attribute boolean bool;
};
callback interface TestCallbackInterface9 : TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface10 : TestCallbackInterface1 {
void bar();
};
""")
results = parser.finish()
for (i, iface) in enumerate(results):
harness.check(iface.isSingleOperationInterface(), i < 4,
"Interface %s should be a single operation interface" %
iface.identifier.name)
|
gymnasium/edx-platform
|
refs/heads/open-release/hawthorn.master
|
cms/djangoapps/contentstore/debug_file_uploader.py
|
25
|
import time
from django.core.files.uploadhandler import FileUploadHandler
class DebugFileUploader(FileUploadHandler):
def __init__(self, request=None):
super(DebugFileUploader, self).__init__(request)
self.count = 0
def receive_data_chunk(self, raw_data, start):
time.sleep(1)
self.count = self.count + len(raw_data)
fail_at = None
if 'fail_at' in self.request.GET:
fail_at = int(self.request.GET.get('fail_at'))
if fail_at and self.count > fail_at:
raise Exception('Triggered fail')
return raw_data
def file_complete(self, file_size):
return None
|
davidfraser/genshi
|
refs/heads/master
|
genshi/tests/__init__.py
|
23
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import unittest
def suite():
import genshi
from genshi.tests import builder, core, input, output, path, util
from genshi.filters import tests as filters
from genshi.template import tests as template
suite = unittest.TestSuite()
suite.addTest(builder.suite())
suite.addTest(core.suite())
suite.addTest(filters.suite())
suite.addTest(input.suite())
suite.addTest(output.suite())
suite.addTest(path.suite())
suite.addTest(template.suite())
suite.addTest(util.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
cstavr/synnefo
|
refs/heads/develop
|
snf-astakos-app/astakos/im/tests/management/user_activation_send.py
|
10
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import mail
from astakos.im.user_logic import verify
from .common import SynnefoManagementTestCase, call_synnefo_command
def snf_manage(user, **kwargs):
"""An easy to use wrapper that simulates snf-manage."""
id = str(user.pk)
return call_synnefo_command("user-activation-send", *(id,), **kwargs)
class TestSendUserActivation(SynnefoManagementTestCase):
"""Class to unit test the "user-activation-send" management command."""
def test_send_activation(self):
"""Test if verification mail is send appropriately."""
# Sending a verification mail to an unverified user should work.
out, err = snf_manage(self.user1)
self.reload_user()
self.assertInLog("Activation sent to '%s'" % self.user1.email, err)
# Check if email is actually sent.
self.assertEqual(len(mail.outbox), 1)
body = mail.outbox[0].body
self.assertIn(self.user1.realname, body)
self.assertIn(self.user1.verification_code, body)
# Verify the user.
self.assertEqual(len(mail.outbox), 1)
res = verify(self.user1, self.user1.verification_code)
self.assertFalse(res.is_error())
# Sending a verification mail to a verified user should fail.
out, err = snf_manage(self.user1)
self.assertInLog("User email already verified '%s'" % self.user1.email,
err)
|
RamonGuiuGou/l10n-spain
|
refs/heads/9.0
|
l10n_es_partner/wizard/l10n_es_partner_wizard.py
|
4
|
# -*- coding: utf-8 -*-
# © 2013-2016 Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl-3).
from openerp import models, fields, api, _
from openerp import tools
from ..gen_src.gen_data_banks import gen_bank_data_xml
import tempfile
import os
class L10nEsPartnerImportWizard(models.TransientModel):
_name = 'l10n.es.partner.import.wizard'
_inherit = 'res.config.installer'
import_fail = fields.Boolean(default=False)
@api.multi
def import_local(self):
res = super(L10nEsPartnerImportWizard, self).execute()
path = os.path.join('l10n_es_partner', 'wizard', 'data_banks.xml')
with tools.file_open(path) as fp:
tools.convert_xml_import(
self._cr, 'l10n_es_partner', fp, {}, 'init', noupdate=True)
return res
@api.multi
def execute(self):
import requests
src_file = tempfile.NamedTemporaryFile(delete=False)
dest_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
response = requests.get(
'http://www.bde.es/f/webbde/IFI/servicio/regis/ficheros/es/'
'REGBANESP_CONESTAB_A.XLS')
if not response.ok:
raise Exception()
src_file.write(response.content)
src_file.close()
# Generate XML and reopen it
gen_bank_data_xml(src_file.name, dest_file.name)
tools.convert_xml_import(
self._cr, 'l10n_es_partner', dest_file.name, {}, 'init',
noupdate=True)
except:
self.import_fail = True
return {
'name': _('Import spanish bank data'),
'type': 'ir.actions.act_window',
'res_model': 'l10n.es.partner.import.wizard',
'view_id': self.env.ref("l10n_es_partner."
"l10n_es_partner_import_wizard").id,
'view_type': 'form',
'view_mode': 'form',
'res_id': self.id,
'target': 'new',
}
finally:
os.remove(src_file.name)
os.remove(dest_file.name)
|
astrofrog/glue-3d-viewer
|
refs/heads/master
|
glue_vispy_viewers/extern/vispy/visuals/collections/raw_polygon_collection.py
|
7
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from ... import glsl
from . collection import Collection
from ..transforms import NullTransform
from ...geometry import triangulate
class RawPolygonCollection(Collection):
def __init__(self, user_dtype=None, transform=None,
vertex=None, fragment=None, **kwargs):
base_dtype = [('position', (np.float32, 3), '!local', (0, 0, 0)),
('color', (np.float32, 4), 'local', (0, 0, 0, 1))]
dtype = base_dtype
if user_dtype:
dtype.extend(user_dtype)
if vertex is None:
vertex = glsl.get('collections/raw-triangle.vert')
if transform is None:
transform = NullTransform()
self.transform = transform
if fragment is None:
fragment = glsl.get('collections/raw-triangle.frag')
Collection.__init__(self, dtype=dtype, itype=np.uint32, # 16 for WebGL
mode="triangles",
vertex=vertex, fragment=fragment, **kwargs)
# Set hooks if necessary
program = self._programs[0]
program.vert['transform'] = self.transform
def append(self, points, **kwargs):
"""
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
points : np.array
Vertices composing the triangles
color : list, array or 4-tuple
Path color
"""
vertices, indices = triangulate(points)
itemsize = len(vertices)
itemcount = 1
V = np.empty(itemcount * itemsize, dtype=self.vtype)
for name in self.vtype.names:
if name not in ['collection_index', 'position']:
V[name] = kwargs.get(name, self._defaults[name])
V["position"] = vertices
# Uniforms
if self.utype:
U = np.zeros(itemcount, dtype=self.utype)
for name in self.utype.names:
if name not in ["__unused__"]:
U[name] = kwargs.get(name, self._defaults[name])
else:
U = None
I = np.array(indices).ravel()
Collection.append(self, vertices=V, uniforms=U, indices=I,
itemsize=itemsize)
|
userzimmermann/robotframework-python3
|
refs/heads/master
|
src/robot/testdoc.py
|
1
|
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
from six import string_types
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
## import pythonpathsetter
#HACK: Prevent 2to3 from converting to relative import
pythonpathsetter = __import__('pythonpathsetter')
from robot import utils
from robot.conf import RobotSettings
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robot.parsing import disable_curdir_processing
from robot.running import TestSuiteBuilder
class TestDoc(utils.Application):
def __init__(self):
utils.Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = utils.abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with open(outfile, 'w') as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if isinstance(datasources, string_types):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': utils.format_time(generated_time, gmtsep=' '),
'generatedMillis': int(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return utils.get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return utils.html_escape(item)
def _html(self, item):
return utils.html_format(utils.unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == 'setup':
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == 'teardown':
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.is_for_loop():
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' IN RANGE ' if kw.range else ' IN '
return ', '.join(kw.vars) + joiner + utils.seq2str2(kw.items)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = utils.secs_to_timestr(utils.timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
|
sbuss/voteswap
|
refs/heads/master
|
lib/django/contrib/gis/db/backends/oracle/models.py
|
475
|
"""
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class OracleGeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
app_label = 'gis'
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class OracleSpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
|
sean-/ansible
|
refs/heads/devel
|
v1/ansible/utils/string_functions.py
|
150
|
def isprintable(instring):
if isinstance(instring, str):
#http://stackoverflow.com/a/3637294
import string
printset = set(string.printable)
isprintable = set(instring).issubset(printset)
return isprintable
else:
return True
def count_newlines_from_end(str):
i = len(str)
while i > 0:
if str[i-1] != '\n':
break
i -= 1
return len(str) - i
|
smartdata-x/robots
|
refs/heads/master
|
pylib/Twisted/twisted/test/testutils.py
|
56
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I{Private} test utilities for use throughout Twisted's test suite. Unlike
C{proto_helpers}, this is no exception to the
don't-use-it-outside-Twisted-we-won't-maintain-compatibility rule!
@note: Maintainers be aware: things in this module should be gradually promoted
to more full-featured test helpers and exposed as public API as your
maintenance time permits. In order to be public API though, they need
their own test cases.
"""
from io import BytesIO
from xml.dom import minidom as dom
from twisted.internet.protocol import FileWrapper
class IOPump:
"""Utility to pump data between clients and servers for protocol testing.
Perhaps this is a utility worthy of being in protocol.py?
"""
def __init__(self, client, server, clientIO, serverIO):
self.client = client
self.server = server
self.clientIO = clientIO
self.serverIO = serverIO
def flush(self):
"Pump until there is no more input or output."
while self.pump():
pass
def pump(self):
"""Move data back and forth.
Returns whether any data was moved.
"""
self.clientIO.seek(0)
self.serverIO.seek(0)
cData = self.clientIO.read()
sData = self.serverIO.read()
self.clientIO.seek(0)
self.serverIO.seek(0)
self.clientIO.truncate()
self.serverIO.truncate()
for byte in cData:
self.server.dataReceived(byte)
for byte in sData:
self.client.dataReceived(byte)
if cData or sData:
return 1
else:
return 0
def returnConnected(server, client):
"""Take two Protocol instances and connect them.
"""
cio = BytesIO()
sio = BytesIO()
client.makeConnection(FileWrapper(cio))
server.makeConnection(FileWrapper(sio))
pump = IOPump(client, server, cio, sio)
# Challenge-response authentication:
pump.flush()
# Uh...
pump.flush()
return pump
class XMLAssertionMixin(object):
"""
Test mixin defining a method for comparing serialized XML documents.
Must be mixed in to a L{test case<unittest.TestCase>}.
"""
def assertXMLEqual(self, first, second):
"""
Verify that two strings represent the same XML document.
@param first: An XML string.
@type first: L{bytes}
@param second: An XML string that should match C{first}.
@type second: L{bytes}
"""
self.assertEqual(
dom.parseString(first).toxml(),
dom.parseString(second).toxml())
class _Equal(object):
"""
A class the instances of which are equal to anything and everything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class _NotEqual(object):
"""
A class the instances of which are equal to nothing.
"""
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class ComparisonTestsMixin(object):
"""
A mixin which defines a method for making assertions about the correctness
of an implementation of C{==} and C{!=}.
Use this to unit test objects which follow the common convention for C{==}
and C{!=}:
- The object compares equal to itself
- The object cooperates with unrecognized types to allow them to
implement the comparison
- The object implements not-equal as the opposite of equal
"""
def assertNormalEqualityImplementation(self, firstValueOne, secondValueOne,
valueTwo):
"""
Assert that C{firstValueOne} is equal to C{secondValueOne} but not
equal to C{valueOne} and that it defines equality cooperatively with
other types it doesn't know about.
@param firstValueOne: An object which is expected to compare as equal to
C{secondValueOne} and not equal to C{valueTwo}.
@param secondValueOne: A different object than C{firstValueOne} but
which is expected to compare equal to that object.
@param valueTwo: An object which is expected to compare as not equal to
C{firstValueOne}.
"""
# This doesn't use assertEqual and assertNotEqual because the exact
# operator those functions use is not very well defined. The point
# of these assertions is to check the results of the use of specific
# operators (precisely to ensure that using different permutations
# (eg "x == y" or "not (x != y)") which should yield the same results
# actually does yield the same result). -exarkun
self.assertTrue(firstValueOne == firstValueOne)
self.assertTrue(firstValueOne == secondValueOne)
self.assertFalse(firstValueOne == valueTwo)
self.assertFalse(firstValueOne != firstValueOne)
self.assertFalse(firstValueOne != secondValueOne)
self.assertTrue(firstValueOne != valueTwo)
self.assertTrue(firstValueOne == _Equal())
self.assertFalse(firstValueOne != _Equal())
self.assertFalse(firstValueOne == _NotEqual())
self.assertTrue(firstValueOne != _NotEqual())
|
russcollier/SamplesAndNuggets
|
refs/heads/master
|
python/threadpool_example.py
|
1
|
import logging
import urllib.request
from datetime import datetime
from multiprocessing import Manager, Value
from multiprocessing.pool import ThreadPool
class EntryPoint:
Log = logging.getLogger(__name__)
def __init__(self):
self.__total_size = Value('i', 0)
self.__sizes_by_file = Manager().dict()
def main(self):
urls = ['https://code.jquery.com/jquery-git.js',
'https://code.jquery.com/jquery-3.1.0.js',
'https://code.jquery.com/jquery-3.0.0.js',
'https://code.jquery.com/jquery-2.2.0.js',
'https://code.jquery.com/jquery-2.1.0.js',
'https://code.jquery.com/jquery-2.0.0.js',
'https://code.jquery.com/jquery-1.12.0.js',
'https://code.jquery.com/jquery-1.11.0.js',
'https://code.jquery.com/jquery-1.10.0.js',
'https://code.jquery.com/jquery-1.9.0.js',
'https://code.jquery.com/jquery-1.7.0.js',
'https://code.jquery.com/jquery-1.6.js',
'https://code.jquery.com/jquery-1.5.js',
'https://code.jquery.com/jquery-1.4.js',
'https://code.jquery.com/jquery-1.3.js',
'https://code.jquery.com/jquery-1.2.js',
'https://code.jquery.com/jquery-1.1.js',
'https://code.jquery.com/jquery-1.0.js']
self.__compute_serially(urls)
self.__compute_with_threadpool(urls)
def __compute_serially(self, urls):
start_time = datetime.utcnow()
sizes_by_file = dict()
for url in urls:
sizes_by_file[url] = self.__get_size_of_file(url)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __compute_with_threadpool(self, urls):
start_time = datetime.utcnow()
pool = ThreadPool(processes=8)
pool.map(self.__get_size_of_file_in_parallel, urls)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __get_size_of_file_in_parallel(self, url):
self.__sizes_by_file[url] = self.__get_size_of_file(url)
# with self.__total_size.get_lock():
# self.__total_size.value += self.__get_size_of_file(url)
@staticmethod
def __get_size_of_file(url):
with urllib.request.urlopen(url) as f:
contents = f.read()
return len(contents)
@staticmethod
def get_timespan(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.StreamHandler()
logger.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - [%(thread)d] %(name)s - %(message)s'))
root_logger.addHandler(logger)
def main():
setup_logging()
log = logging.getLogger()
try:
EntryPoint().main()
except Exception as e:
log.exception(e)
if __name__ == '__main__':
main()
|
ARLahan/authomatic
|
refs/heads/master
|
examples/django/example/simple/models.py
|
10644
|
from django.db import models
# Create your models here.
|
donghaoren/iVisDesigner
|
refs/heads/master
|
server/proxy/models.py
|
10644
|
from django.db import models
# Create your models here.
|
gboone/wedding.harmsboone.org
|
refs/heads/master
|
rsvp/migrations/0010_auto__chg_field_room_room_type.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Room.room_type'
db.alter_column(u'rsvp_room', 'room_type', self.gf('django.db.models.fields.CharField')(default=u'One bedroom apartment', max_length=255))
def backwards(self, orm):
# Changing field 'Room.room_type'
db.alter_column(u'rsvp_room', 'room_type', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
u'rsvp.event': {
'Meta': {'object_name': 'Event'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.guest': {
'Meta': {'ordering': "['-last_name', '-first_name']", 'object_name': 'Guest'},
'attending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_as': ('django.db.models.fields.CharField', [], {'max_length': '91', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'max_guests': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primary_email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street_addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zip_code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
u'rsvp.hotel': {
'Meta': {'object_name': 'Hotel'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_guest_count': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
u'rsvp.location': {
'Meta': {'object_name': 'Location'},
'distance': ('django.db.models.fields.DecimalField', [], {'max_digits': '3', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.room': {
'Meta': {'object_name': 'Room'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
'hotel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsvp.Hotel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_occupancy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'room_type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'rsvp.table': {
'Meta': {'object_name': 'Table'},
'guests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['rsvp.Guest']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rsvp']
|
dsweet04/rekall
|
refs/heads/master
|
tools/layout_expert/layout_expert/c_ast/pre_ast.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Google Inc. All Rights Reserved.
#
# Authors:
# Arkadiusz Socała <as277575@mimuw.edu.pl>
# Michael Cohen <scudette@google.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Classes representing nodes of AST before preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from layout_expert.c_ast import visitor_mixin
from layout_expert.common import data_container
from layout_expert.serialization import json_serialization
class _PreASTNode(data_container.DataContainer, visitor_mixin.VisitorMixin):
"""A base clas for pre-AST nodes."""
class File(_PreASTNode):
def __init__(self, content):
super(File, self).__init__()
self.content = content
class Include(_PreASTNode):
def __init__(self, path, quotes_type, absolute_path=None, content=None):
super(Include, self).__init__()
self.path = path
self.quotes_type = quotes_type
self.absolute_path = absolute_path
self.content = content
class Pragma(_PreASTNode):
def __init__(self, argument_string):
super(Pragma, self).__init__()
self.argument_string = argument_string
class Error(_PreASTNode):
def __init__(self, message):
super(Error, self).__init__()
self.message = message
class DefineObjectLike(_PreASTNode):
"""A class that represents an object-like definition.
For example:
#define foo
"""
def __init__(self, name, replacement):
super(DefineObjectLike, self).__init__()
self.name = name
self.replacement = replacement
class DefineFunctionLike(_PreASTNode):
"""A class that represents a function-like definition.
For exmaple:
#define foo()
"""
def __init__(self, name, arguments, replacement):
super(DefineFunctionLike, self).__init__()
self.name = name
self.arguments = arguments
self.replacement = replacement
class MacroExpression(_PreASTNode):
"""Represent an expression to be expanded by the preprocessor.
Actually evaluating the expression happens upon macro substitution. We just
copy the expression verbatim here.
"""
def __init__(self, expression_string):
super(MacroExpression, self).__init__()
self.expression_string = expression_string
class Undef(_PreASTNode):
def __init__(self, name):
super(Undef, self).__init__()
self.name = name
class If(_PreASTNode):
"""A class to represent a conditional (e.g. ifdef) block."""
def __init__(self, conditional_blocks):
"""Initializes an If object.
Args:
conditional_blocks: A list of ConditionalBlock objects.
Note that the child nodes are of types ConditionalBlock and
CompositeBlock (the last one in the case of else clause).
"""
super(If, self).__init__()
self.conditional_blocks = conditional_blocks
class ConditionalBlock(_PreASTNode):
"""A class representing a pair of conditional expression and content.
This is an internal node to represent the condition inside of an If block.
"""
def __init__(self, conditional_expression, content):
"""Initiates a ConditionalBlock object.
Args:
conditional_expression: an expression representing a logic
condition
content: a content corresponding to this condition.
"""
super(ConditionalBlock, self).__init__()
self.conditional_expression = conditional_expression
self.content = content
class CompositeBlock(_PreASTNode):
def __init__(self, content=None):
super(CompositeBlock, self).__init__()
self.content = content or []
def __str__(self):
return ' '.join(map(str, self.content))
class TextBlock(_PreASTNode):
def __init__(self, content):
super(TextBlock, self).__init__()
self.content = content
json_serialization.DataContainerObjectRenderer.set_safe_constructors(
File,
Include,
Pragma,
Error,
DefineObjectLike,
DefineFunctionLike,
Undef,
If,
ConditionalBlock,
CompositeBlock,
TextBlock,
MacroExpression,
)
|
dmitry-sobolev/ansible
|
refs/heads/devel
|
contrib/inventory/apstra_aos.py
|
25
|
#!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your prefered directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import os
import argparse
import re
try:
import json
except ImportError:
import simplejson as json
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session( server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos' )
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server )
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password )
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username )
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find( uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'] )
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
pass
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
pass
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
pass
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
pass
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
pass
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name,'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile('\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
|
moria/zulip
|
refs/heads/master
|
manage.py
|
109
|
#!/usr/bin/env python
import os
import sys
import logging
import subprocess
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
logger = logging.getLogger("zulip.management")
subprocess.check_call([os.path.join(os.path.dirname(__file__), "bin", "log-management-command"),
" ".join(sys.argv)])
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
Chaffelson/whoville
|
refs/heads/master
|
whoville/cloudbreak/models/flex_subscription_response.py
|
1
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlexSubscriptionResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'subscription_id': 'str',
'smart_sense_subscription_id': 'int',
'used_as_default': 'bool',
'used_for_controller': 'bool',
'id': 'int',
'owner': 'str',
'account': 'str',
'public_in_account': 'bool',
'smart_sense_subscription': 'SmartSenseSubscriptionJson'
}
attribute_map = {
'name': 'name',
'subscription_id': 'subscriptionId',
'smart_sense_subscription_id': 'smartSenseSubscriptionId',
'used_as_default': 'usedAsDefault',
'used_for_controller': 'usedForController',
'id': 'id',
'owner': 'owner',
'account': 'account',
'public_in_account': 'publicInAccount',
'smart_sense_subscription': 'smartSenseSubscription'
}
def __init__(self, name=None, subscription_id=None, smart_sense_subscription_id=None, used_as_default=False, used_for_controller=False, id=None, owner=None, account=None, public_in_account=False, smart_sense_subscription=None):
"""
FlexSubscriptionResponse - a model defined in Swagger
"""
self._name = None
self._subscription_id = None
self._smart_sense_subscription_id = None
self._used_as_default = None
self._used_for_controller = None
self._id = None
self._owner = None
self._account = None
self._public_in_account = None
self._smart_sense_subscription = None
self.name = name
if subscription_id is not None:
self.subscription_id = subscription_id
if smart_sense_subscription_id is not None:
self.smart_sense_subscription_id = smart_sense_subscription_id
if used_as_default is not None:
self.used_as_default = used_as_default
if used_for_controller is not None:
self.used_for_controller = used_for_controller
if id is not None:
self.id = id
if owner is not None:
self.owner = owner
if account is not None:
self.account = account
if public_in_account is not None:
self.public_in_account = public_in_account
if smart_sense_subscription is not None:
self.smart_sense_subscription = smart_sense_subscription
@property
def name(self):
"""
Gets the name of this FlexSubscriptionResponse.
name of the resource
:return: The name of this FlexSubscriptionResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this FlexSubscriptionResponse.
name of the resource
:param name: The name of this FlexSubscriptionResponse.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def subscription_id(self):
"""
Gets the subscription_id of this FlexSubscriptionResponse.
Identifier of Flex subscription.
:return: The subscription_id of this FlexSubscriptionResponse.
:rtype: str
"""
return self._subscription_id
@subscription_id.setter
def subscription_id(self, subscription_id):
"""
Sets the subscription_id of this FlexSubscriptionResponse.
Identifier of Flex subscription.
:param subscription_id: The subscription_id of this FlexSubscriptionResponse.
:type: str
"""
if subscription_id is not None and not re.search('^(FLEX-[0-9]{10}$)', subscription_id):
raise ValueError("Invalid value for `subscription_id`, must be a follow pattern or equal to `/^(FLEX-[0-9]{10}$)/`")
self._subscription_id = subscription_id
@property
def smart_sense_subscription_id(self):
"""
Gets the smart_sense_subscription_id of this FlexSubscriptionResponse.
Identifier of SmartSense subscription Cloudbreak domain object json representation.
:return: The smart_sense_subscription_id of this FlexSubscriptionResponse.
:rtype: int
"""
return self._smart_sense_subscription_id
@smart_sense_subscription_id.setter
def smart_sense_subscription_id(self, smart_sense_subscription_id):
"""
Sets the smart_sense_subscription_id of this FlexSubscriptionResponse.
Identifier of SmartSense subscription Cloudbreak domain object json representation.
:param smart_sense_subscription_id: The smart_sense_subscription_id of this FlexSubscriptionResponse.
:type: int
"""
self._smart_sense_subscription_id = smart_sense_subscription_id
@property
def used_as_default(self):
"""
Gets the used_as_default of this FlexSubscriptionResponse.
true if the flex subscription is the default one
:return: The used_as_default of this FlexSubscriptionResponse.
:rtype: bool
"""
return self._used_as_default
@used_as_default.setter
def used_as_default(self, used_as_default):
"""
Sets the used_as_default of this FlexSubscriptionResponse.
true if the flex subscription is the default one
:param used_as_default: The used_as_default of this FlexSubscriptionResponse.
:type: bool
"""
self._used_as_default = used_as_default
@property
def used_for_controller(self):
"""
Gets the used_for_controller of this FlexSubscriptionResponse.
true if the flex subscription was used for the controller
:return: The used_for_controller of this FlexSubscriptionResponse.
:rtype: bool
"""
return self._used_for_controller
@used_for_controller.setter
def used_for_controller(self, used_for_controller):
"""
Sets the used_for_controller of this FlexSubscriptionResponse.
true if the flex subscription was used for the controller
:param used_for_controller: The used_for_controller of this FlexSubscriptionResponse.
:type: bool
"""
self._used_for_controller = used_for_controller
@property
def id(self):
"""
Gets the id of this FlexSubscriptionResponse.
id of the resource
:return: The id of this FlexSubscriptionResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this FlexSubscriptionResponse.
id of the resource
:param id: The id of this FlexSubscriptionResponse.
:type: int
"""
self._id = id
@property
def owner(self):
"""
Gets the owner of this FlexSubscriptionResponse.
id of the resource owner that is provided by OAuth provider
:return: The owner of this FlexSubscriptionResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this FlexSubscriptionResponse.
id of the resource owner that is provided by OAuth provider
:param owner: The owner of this FlexSubscriptionResponse.
:type: str
"""
self._owner = owner
@property
def account(self):
"""
Gets the account of this FlexSubscriptionResponse.
account id of the resource owner that is provided by OAuth provider
:return: The account of this FlexSubscriptionResponse.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this FlexSubscriptionResponse.
account id of the resource owner that is provided by OAuth provider
:param account: The account of this FlexSubscriptionResponse.
:type: str
"""
self._account = account
@property
def public_in_account(self):
"""
Gets the public_in_account of this FlexSubscriptionResponse.
resource is visible in account
:return: The public_in_account of this FlexSubscriptionResponse.
:rtype: bool
"""
return self._public_in_account
@public_in_account.setter
def public_in_account(self, public_in_account):
"""
Sets the public_in_account of this FlexSubscriptionResponse.
resource is visible in account
:param public_in_account: The public_in_account of this FlexSubscriptionResponse.
:type: bool
"""
self._public_in_account = public_in_account
@property
def smart_sense_subscription(self):
"""
Gets the smart_sense_subscription of this FlexSubscriptionResponse.
The associated SmartSense subscription Cloudbreak domain object json representation.
:return: The smart_sense_subscription of this FlexSubscriptionResponse.
:rtype: SmartSenseSubscriptionJson
"""
return self._smart_sense_subscription
@smart_sense_subscription.setter
def smart_sense_subscription(self, smart_sense_subscription):
"""
Sets the smart_sense_subscription of this FlexSubscriptionResponse.
The associated SmartSense subscription Cloudbreak domain object json representation.
:param smart_sense_subscription: The smart_sense_subscription of this FlexSubscriptionResponse.
:type: SmartSenseSubscriptionJson
"""
self._smart_sense_subscription = smart_sense_subscription
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlexSubscriptionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
nicproulx/mne-python
|
refs/heads/placeholder
|
examples/io/plot_read_epochs.py
|
15
|
"""
==================================
Reading epochs from a raw FIF file
==================================
This script shows how to read the epochs from a raw file given
a list of events. For illustration, we compute the evoked responses
for both MEG and EEG data by averaging all the epochs.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average() # average epochs to get the evoked response
###############################################################################
# Show result
evoked.plot()
|
Eigenstate/msmbuilder
|
refs/heads/master
|
msmbuilder/example_datasets/brownian1d.py
|
7
|
"""Very simple datasets of brownian dynamics in one dimension."""
# Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
import time
import numpy as np
from .base import _NWell
from ..msm import _solve_msm_eigensystem
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
# DO NOT CHANGE THESE CONSTANTS WITHOUT UPDATING THE
# "DOUBLEWELL_DESCRIPTION" VARIABLE
DIFFUSION_CONST = 1e3
DT = 1e-3
DT_SQRT_2D = DT * np.sqrt(2 * DIFFUSION_CONST)
__all__ = ['load_doublewell', 'load_quadwell',
'doublewell_eigs', 'quadwell_eigs']
# -----------------------------------------------------------------------------
# User functions
# -----------------------------------------------------------------------------
class DoubleWell(_NWell):
r"""Brownian dynamics on a 1D double well potential
Parameters
----------
data_home : optional, default: None
Specify another cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
random_state : {int, None}, default: None
Seed the psuedorandom number generator to generate trajectories. If
seed is None, the global numpy PRNG is used. If random_state is an
int, the simulations will be cached in ``data_home``, or loaded from
``data_home`` if simulations with that seed have been performed already.
With random_state=None, new simulations will be performed and the
trajectories will not be cached.
Notes
-----
This dataset consists of 10 trajectories simulated with Brownian dynamics on
the reduced potential function
V(x) = 1 + cos(2x)
with reflecting boundary conditions at x=-pi and x=pi. The simulations
are governed by the stochastic differential equation
dx_t/dt = -\nabla V(x) + \sqrt{2D} * R(t),
where R(t) is a standard normal white-noise process, and D=1e3. The
timsetep is 1e-3. Each trajectory is 10^5 steps long, and starts at
x_0 = 0.
"""
target_name = "doublewell"
n_trajectories = 10
def simulate_func(self, random):
return _simulate_doublewell(random)
def potential(self, x):
return 1 + np.cos(2 * x)
def load_doublewell(data_home=None, random_state=None):
return DoubleWell(data_home, random_state).get()
load_doublewell.__doc__ = DoubleWell.__doc__
class QuadWell(_NWell):
r"""Brownian dynamics on a 1D four well potential
Parameters
----------
data_home : optional, default: None
Specify another cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
random_state : {int, None}, default: None
Seed the psuedorandom number generator to generate trajectories. If
seed is None, the global numpy PRNG is used. If random_state is an
int, the simulations will be cached in ``data_home``, or loaded from
``data_home`` if simulations with that seed have been performed already.
With random_state=None, new simulations will be performed and the
trajectories will not be cached.
Notes
-----
This dataset consists of 100 trajectories simulated with Brownian dynamics
on the reduced potential function
V = 4(x^8 + 0.8 exp(-80 x^2) + 0.2 exp(-80 (x-0.5)^2) + 0.5 exp(-40 (x+0.5)^2)).
The simulations are governed by the stochastic differential equation
dx_t/dt = -\nabla V(x) + \sqrt{2D} * R(t),
where R(t) is a standard normal white-noise process, and D=1e3. The timsetep
is 1e-3. Each trajectory is 10^3 steps long, and starts from a random
initial point sampled from the uniform distribution on [-1, 1].
"""
target_name = "quadwell"
n_trajectories = 100
def simulate_func(self, random):
return _simulate_quadwell(random)
def potential(self, x):
return 4 * (x ** 8 + 0.8 * np.exp(-80 * x ** 2) + 0.2 * np.exp(
-80 * (x - 0.5) ** 2) +
0.5 * np.exp(-40 * (x + 0.5) ** 2))
def load_quadwell(data_home=None, random_state=None):
return QuadWell(data_home, random_state).get()
load_quadwell.__doc__ = QuadWell.__doc__
def doublewell_eigs(n_grid, lag_time=1):
"""Analytic eigenvalues/eigenvectors for the doublwell system
TODO: DOCUMENT ME
"""
return _brownian_eigs(n_grid, lag_time, DOUBLEWELL_GRAD_POTENTIAL,
-np.pi, np.pi, reflect_bc=True)
def quadwell_eigs(n_grid, lag_time=1):
"""Analytic eigenvalues/eigenvectors for the quadwell system
TODO: DOCUMENT ME
"""
return _brownian_eigs(n_grid, lag_time, QUADWELL_GRAD_POTENTIAL,
-1.2, 1.2, reflect_bc=False)
# -----------------------------------------------------------------------------
# Internal functions
# -----------------------------------------------------------------------------
DOUBLEWELL_GRAD_POTENTIAL = lambda x: -2 * np.sin(2 * x)
QUADWELL_GRAD_POTENTIAL = lambda x: 4 * (
8 * x ** 7 - 128 * x * np.exp(-80 * x ** 2) - \
32 * (x - 0.5) * np.exp(-80 * (x - 0.5) ** 2) - 40 * (x + 0.5) * np.exp(
-40 * (x + 0.5) ** 2))
def _simulate_doublewell(random):
# DO NOT CHANGE THESE CONSTANTS WITHOUT UPDATING THE
# "DOUBLEWELL_DESCRIPTION" VARIABLE AND UPDATING THE VERSION NUMBER
# in the load_doublewell FUNCTION
x0 = 0
n_steps = 1e5
n_trajectories = 10
trajectories = [_propagate1d(
x0, n_steps, DOUBLEWELL_GRAD_POTENTIAL, random, bc_min=-np.pi,
bc_max=np.pi, verbose=True).reshape(-1, 1)
for i in range(n_trajectories)]
return trajectories
def _simulate_quadwell(random):
# DO NOT CHANGE THESE CONSTANTS WITHOUT UPDATING THE
# "QUADWELL_DESCRIPTION" VARIABLE AND UPDATING THE VERSION NUMBER
# in the load_quadwell FUNCTION
n_steps = 1e3
n_trajectories = 100
x0 = random.uniform(-1, 1, size=n_trajectories)
trajectories = [_propagate1d(
x0[i], n_steps, QUADWELL_GRAD_POTENTIAL,
random=random, verbose=False).reshape(-1, 1)
for i in range(n_trajectories)]
return trajectories
def _reflect_boundary_conditions(x, min, max):
if x > max:
return 2 * max - x
if x < min:
return 2 * min - x
return x
def _propagate1d(x0, n_steps, grad_potential, random, bc_min=None, bc_max=None,
verbose=True):
start = time.time()
n_steps = int(n_steps)
if bc_min is None and bc_max is None:
bc = lambda x: x
else:
bc = lambda x: _reflect_boundary_conditions(x, bc_min, bc_max)
rand = random.randn(n_steps)
x = np.zeros(n_steps + 1)
x[0] = x0
for i in range(n_steps):
x_i_plus_1 = x[i] - DT * grad_potential(x[i]) + DT_SQRT_2D * rand[i]
x[i + 1] = bc(x_i_plus_1)
if verbose:
print('%d steps/s' % (n_steps / (time.time() - start)))
return x
def _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
ONE_OVER_SQRT_2PI = 1.0 / (np.sqrt(2 * np.pi))
normalpdf = lambda x: ONE_OVER_SQRT_2PI * np.exp(-0.5 * (x * x))
grid = np.linspace(xmin, xmax, n_grid)
width = grid[1] - grid[0]
transmat = np.zeros((n_grid, n_grid))
for i, x_i in enumerate(grid):
if reflect_bc:
for offset in range(-(n_grid - 1), n_grid):
x_j = x_i + (offset * width)
j = _reflect_boundary_conditions(i + offset, 0, n_grid - 1)
# What is the probability of going from x_i to x_j in one step?
diff = (x_j - x_i + DT * grad_potential(x_i)) / DT_SQRT_2D
transmat[i, j] += normalpdf(diff)
else:
for j, x_j in enumerate(grid):
# What is the probability of going from x_i to x_j in one step?
diff = (x_j - x_i + DT * grad_potential(x_i)) / DT_SQRT_2D
transmat[i, j] += normalpdf(diff)
transmat[i, :] = transmat[i, :] / np.sum(transmat[i, :])
transmat = np.linalg.matrix_power(transmat, lag_time)
return transmat
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/DateTodayReturnType/a.py
|
83
|
from datetime import date
print(date.today().strftime('%y'))
|
rven/odoo
|
refs/heads/14.0-fix-partner-merge-mail-activity
|
addons/account_edi_ubl/__init__.py
|
1262
|
from . import models
|
coala/corobo
|
refs/heads/master
|
plugins/constants.py
|
1
|
API_DOCS = 'https://api.coala.io/en/latest'
USER_DOCS = 'https://docs.coala.io/en/latest'
MAX_MSG_LEN = 1000
MAX_LINES = 20
PRIVATE_CMDS = ['assign_cmd', 'create_issue_cmd', 'invite_cmd', 'mark_cmd',
'pr_stats', 'unassign_cmd', 'pitchfork', 'the_rules', 'wa',
'answer', 'lmgtfy', 'ghetto', 'explain', 'nevermind']
|
sloanyang/aquantic
|
refs/heads/master
|
Tools/Scripts/webkitpy/tool/grammar.py
|
217
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def plural(noun):
# This is a dumb plural() implementation that is just enough for our uses.
if re.search("h$", noun):
return noun + "es"
else:
return noun + "s"
def pluralize(noun, count):
if count != 1:
noun = plural(noun)
return "%d %s" % (count, noun)
def join_with_separators(list_of_strings, separator=', ', only_two_separator=" and ", last_separator=', and '):
if not list_of_strings:
return ""
if len(list_of_strings) == 1:
return list_of_strings[0]
if len(list_of_strings) == 2:
return only_two_separator.join(list_of_strings)
return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1])
|
jianjian0dandan/Zhihu_Spider
|
refs/heads/master
|
zhihu/zhihu/pipelines.py
|
4
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.contrib.exporter import JsonLinesItemExporter, JsonItemExporter, XmlItemExporter
from zhihu.items import ZhihuItem, ZhiHuA, ZhiHuQ, ZhiHuU
from scrapy import signals
import codecs
class ZhihuPipeline(object):
def __init__(self):
self.files_path = {}
self.files = {}
self.exporters = {}
self.files_path['user_file'] = './zhihu_user.json'
self.files_path['question_file'] = './zhihu_q.json'
self.files_path['answer_file'] = './zhihu_a.json'
def open_spider(self, spider):
print 'Opening spider.'
self.files['question'] = codecs.open(self.files_path['question_file'], 'w', encoding='utf-8')
self.files['answer'] = codecs.open(self.files_path['answer_file'], 'w', encoding='utf-8')
self.files['user'] = codecs.open(self.files_path['user_file'], 'w', encoding='utf-8')
self.exporters['question'] = JsonItemExporter(self.files['question'])
self.exporters['answer'] = JsonItemExporter(self.files['answer'])
self.exporters['user'] = JsonItemExporter(self.files['user'])
for exporter in self.exporters.itervalues():
exporter.start_exporting()
def close_spider(self, spider):
print 'Closing spider'
for exporter in self.exporters.itervalues():
exporter.finish_exporting()
for opened_file in self.files.itervalues():
opened_file.close()
def process_item(self, item, spider):
if isinstance(item, ZhiHuQ):
# print 'It is question'
self.exporters['question'].export_item(item)
elif isinstance(item, ZhiHuA):
# print 'It is answer'
self.exporters['answer'].export_item(item)
elif isinstance(item, ZhiHuU):
# print 'It is user'
self.exporters['user'].export_item(item)
else:
pass
return item
|
jiachenning/odoo
|
refs/heads/8.0
|
addons/base_report_designer/wizard/base_report_designer_modify.py
|
314
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
import urllib
from openerp import osv, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class base_report_sxw(osv.osv_memory):
"""Base Report sxw """
_name = 'base.report.sxw'
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", required=True,domain=[('report_sxw_content','<>',False)],),
}
def get_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_sxw')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.file.sxw',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_file_sxw(osv.osv_memory):
"""Base Report File sxw """
_name = 'base.report.file.sxw'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_file_sxw, self).default_get(cr, uid, fields, context=context)
report_id1 = self.pool['base.report.sxw'].search(cr,uid,[])
data = self.pool['base.report.sxw'].read(cr, uid, report_id1, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if context is None:
context={}
if 'report_id' in fields:
res['report_id'] = data['report_id']
res['file_sxw'] = base64.encodestring(report.report_sxw_content)
return res
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", readonly=True),
'file_sxw':fields.binary('Your .SXW file',readonly=True),
'file_sxw_upload':fields.binary('Your .SXW file',required=True)
}
def upload_report(self, cr, uid, ids, context=None):
from base_report_designer import openerp_sxw2rml
import StringIO
data=self.read(cr,uid,ids)[0]
sxwval = StringIO.StringIO(base64.decodestring(data['file_sxw_upload']))
fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/openerp_sxw2rml')
newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read()))
report = self.pool['ir.actions.report.xml'].write(cr, uid, [data['report_id']], {
'report_sxw_content': base64.decodestring(data['file_sxw_upload']),
'report_rml_content': newrmlcontent
})
cr.commit()
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.rml.save',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_rml_save(osv.osv_memory):
"""Base Report file Save"""
_name = 'base.report.rml.save'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_rml_save, self).default_get(cr, uid, fields, context=context)
report_ids = self.pool['base.report.sxw'].search(cr,uid,[], context=context)
data = self.pool['base.report.file.sxw'].read(cr, uid, report_ids, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if 'file_rml' in fields:
res['file_rml'] = base64.encodestring(report.report_rml_content)
return res
_columns = {
'file_rml':fields.binary('Save As'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
indykish/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/serve.py
|
164
|
#!/usr/bin/env python
from tools.serve import serve
def main():
serve.main()
|
adrienbrault/home-assistant
|
refs/heads/dev
|
homeassistant/components/cloud/client.py
|
3
|
"""Interface implementation for cloud client."""
from __future__ import annotations
import asyncio
import logging
from pathlib import Path
from typing import Any
import aiohttp
from hass_nabucasa.client import CloudClient as Interface
from homeassistant.components.alexa import (
errors as alexa_errors,
smart_home as alexa_sh,
)
from homeassistant.components.google_assistant import const as gc, smart_home as ga
from homeassistant.const import HTTP_OK
from homeassistant.core import Context, HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from homeassistant.util.aiohttp import MockRequest
from . import alexa_config, google_config, utils
from .const import DISPATCHER_REMOTE_UPDATE, DOMAIN
from .prefs import CloudPreferences
class CloudClient(Interface):
"""Interface class for Home Assistant Cloud."""
def __init__(
self,
hass: HomeAssistant,
prefs: CloudPreferences,
websession: aiohttp.ClientSession,
alexa_user_config: dict[str, Any],
google_user_config: dict[str, Any],
):
"""Initialize client interface to Cloud."""
self._hass = hass
self._prefs = prefs
self._websession = websession
self.google_user_config = google_user_config
self.alexa_user_config = alexa_user_config
self._alexa_config = None
self._google_config = None
@property
def base_path(self) -> Path:
"""Return path to base dir."""
return Path(self._hass.config.config_dir)
@property
def prefs(self) -> CloudPreferences:
"""Return Cloud preferences."""
return self._prefs
@property
def loop(self) -> asyncio.BaseEventLoop:
"""Return client loop."""
return self._hass.loop
@property
def websession(self) -> aiohttp.ClientSession:
"""Return client session for aiohttp."""
return self._websession
@property
def aiohttp_runner(self) -> aiohttp.web.AppRunner:
"""Return client webinterface aiohttp application."""
return self._hass.http.runner
@property
def cloudhooks(self) -> dict[str, dict[str, str]]:
"""Return list of cloudhooks."""
return self._prefs.cloudhooks
@property
def remote_autostart(self) -> bool:
"""Return true if we want start a remote connection."""
return self._prefs.remote_enabled
async def get_alexa_config(self) -> alexa_config.AlexaConfig:
"""Return Alexa config."""
if self._alexa_config is None:
assert self.cloud is not None
cloud_user = await self._prefs.get_cloud_user()
self._alexa_config = alexa_config.AlexaConfig(
self._hass, self.alexa_user_config, cloud_user, self._prefs, self.cloud
)
return self._alexa_config
async def get_google_config(self) -> google_config.CloudGoogleConfig:
"""Return Google config."""
if not self._google_config:
assert self.cloud is not None
cloud_user = await self._prefs.get_cloud_user()
self._google_config = google_config.CloudGoogleConfig(
self._hass, self.google_user_config, cloud_user, self._prefs, self.cloud
)
await self._google_config.async_initialize()
return self._google_config
async def logged_in(self) -> None:
"""When user logs in."""
is_new_user = await self.prefs.async_set_username(self.cloud.username)
async def enable_alexa(_):
"""Enable Alexa."""
aconf = await self.get_alexa_config()
try:
await aconf.async_enable_proactive_mode()
except aiohttp.ClientError as err: # If no internet available yet
if self._hass.is_running:
logging.getLogger(__package__).warning(
"Unable to activate Alexa Report State: %s. Retrying in 30 seconds",
err,
)
async_call_later(self._hass, 30, enable_alexa)
except alexa_errors.NoTokenAvailable:
pass
async def enable_google(_):
"""Enable Google."""
gconf = await self.get_google_config()
gconf.async_enable_local_sdk()
if gconf.should_report_state:
gconf.async_enable_report_state()
if is_new_user:
await gconf.async_sync_entities(gconf.agent_user_id)
tasks = []
if self._prefs.alexa_enabled and self._prefs.alexa_report_state:
tasks.append(enable_alexa)
if self._prefs.google_enabled:
tasks.append(enable_google)
if tasks:
await asyncio.gather(*[task(None) for task in tasks])
async def cleanups(self) -> None:
"""Cleanup some stuff after logout."""
await self.prefs.async_set_username(None)
self._google_config = None
@callback
def user_message(self, identifier: str, title: str, message: str) -> None:
"""Create a message for user to UI."""
self._hass.components.persistent_notification.async_create(
message, title, identifier
)
@callback
def dispatcher_message(self, identifier: str, data: Any = None) -> None:
"""Match cloud notification to dispatcher."""
if identifier.startswith("remote_"):
async_dispatcher_send(self._hass, DISPATCHER_REMOTE_UPDATE, data)
async def async_alexa_message(self, payload: dict[Any, Any]) -> dict[Any, Any]:
"""Process cloud alexa message to client."""
cloud_user = await self._prefs.get_cloud_user()
aconfig = await self.get_alexa_config()
return await alexa_sh.async_handle_message(
self._hass,
aconfig,
payload,
context=Context(user_id=cloud_user),
enabled=self._prefs.alexa_enabled,
)
async def async_google_message(self, payload: dict[Any, Any]) -> dict[Any, Any]:
"""Process cloud google message to client."""
if not self._prefs.google_enabled:
return ga.turned_off_response(payload)
gconf = await self.get_google_config()
return await ga.async_handle_message(
self._hass, gconf, gconf.cloud_user, payload, gc.SOURCE_CLOUD
)
async def async_webhook_message(self, payload: dict[Any, Any]) -> dict[Any, Any]:
"""Process cloud webhook message to client."""
cloudhook_id = payload["cloudhook_id"]
found = None
for cloudhook in self._prefs.cloudhooks.values():
if cloudhook["cloudhook_id"] == cloudhook_id:
found = cloudhook
break
if found is None:
return {"status": HTTP_OK}
request = MockRequest(
content=payload["body"].encode("utf-8"),
headers=payload["headers"],
method=payload["method"],
query_string=payload["query"],
mock_source=DOMAIN,
)
response = await self._hass.components.webhook.async_handle_webhook(
found["webhook_id"], request
)
response_dict = utils.aiohttp_serialize_response(response)
body = response_dict.get("body")
return {
"body": body,
"status": response_dict["status"],
"headers": {"Content-Type": response.content_type},
}
async def async_cloudhooks_update(self, data: dict[str, dict[str, str]]) -> None:
"""Update local list of cloudhooks."""
await self._prefs.async_update(cloudhooks=data)
|
gnotaras/django-postgresql-manager
|
refs/heads/master
|
example/testproject/urls.py
|
5
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'testproject.views.home', name='home'),
# url(r'^testproject/', include('testproject.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.