code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['MovingAverage'] , ['NoCycle'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingAverage_NoCycle_SVR.py
|
Python
|
bsd-3-clause
| 153
|
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='pyRedisBridge',
version='0.0.8',
license='MIT',
url='https://github.com/sourceperl/pyRedisBridge',
platforms='any',
install_requires=required,
scripts=[
'redis_serial_sync'
]
)
|
sourceperl/pyRedisBridge
|
setup.py
|
Python
|
mit
| 334
|
# This file is generated by /tmp/pip-build-JIErOP/scipy/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
atlas_3_10_blas_info={}
atlas_3_10_blas_threads_info={}
atlas_threads_info={'libraries': ['lapack', 'ptf77blas', 'ptcblas', 'atlas'], 'library_dirs': ['/usr/lib64/atlas-sse3'], 'language': 'f77', 'define_macros': [('NO_ATLAS_INFO', -1)], 'include_dirs': ['/usr/include']}
blas_opt_info={'libraries': ['ptf77blas', 'ptcblas', 'atlas'], 'library_dirs': ['/usr/lib64/atlas-sse3'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('NO_ATLAS_INFO', -1)], 'include_dirs': ['/usr/include']}
atlas_blas_threads_info={'libraries': ['ptf77blas', 'ptcblas', 'atlas'], 'library_dirs': ['/usr/lib64/atlas-sse3'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('NO_ATLAS_INFO', -1)], 'include_dirs': ['/usr/include']}
openblas_info={}
lapack_opt_info={'libraries': ['lapack', 'ptf77blas', 'ptcblas', 'atlas'], 'library_dirs': ['/usr/lib64/atlas-sse3'], 'language': 'f77', 'define_macros': [('NO_ATLAS_INFO', -1)], 'include_dirs': ['/usr/include']}
openblas_lapack_info={}
atlas_3_10_threads_info={}
atlas_3_10_info={}
lapack_mkl_info={}
blas_mkl_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
ryfeus/lambda-packs
|
Sklearn_scipy_numpy/source/scipy/__config__.py
|
Python
|
mit
| 1,747
|
from .tilegrids import GeoadminTileGridLV03, GeoadminTileGridLV95, \
GlobalMercatorTileGrid, GlobalGeodeticTileGrid
from .grid import Grid
def getTileGrid(srs):
assert srs in (21781, 2056, 3857, 4326), 'Unsupported tile grid'
if srs == 21781:
return GeoadminTileGridLV03
elif srs == 2056:
return GeoadminTileGridLV95
elif srs == 3857:
return GlobalMercatorTileGrid
elif srs == 4326:
return GlobalGeodeticTileGrid
|
loicgasser/gatilegrid
|
gatilegrid/__init__.py
|
Python
|
mit
| 471
|
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009-2012, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import taboot
import sys
import tempfile
from argparse import ArgumentParser, ArgumentTypeError
from errors import TabootTaskNotFoundException
from os.path import isfile
from taboot.log import *
def resolve_types(ds, relative_to='taboot.tasks'):
"""
Recursively translate string representation of a type within a
datastructure into an actual type instance.
:Parameters:
- `ds`: An arbitrary datastructure. Within `ds`, if a dict key named
`type` is encountered, the string contained there is replaced with the
actual type named.
- `relative_to`: The prefix which types are relative to; used during
import. As an example, if `relative_to`='taboot.tasks' and `ds`
contains a `type` key `command.Run`, then the type is imported as
`taboot.tasks.command.Run`.
"""
__import__(relative_to)
if isinstance(ds, list):
result = []
for item in ds:
result.append(resolve_types(item, relative_to))
return result
elif isinstance(ds, dict):
result = {}
for k, v in ds.iteritems():
if k == 'type':
tokens = v.split('.')
if len(tokens) == 1:
result[k] = getattr(sys.modules[relative_to], tokens[0])
else:
pkg = "%s.%s" % (relative_to, tokens[0])
the_task = ".".join([pkg, tokens[1]])
try:
__import__(pkg)
result[k] = getattr(sys.modules[pkg], tokens[1])
except (AttributeError, ImportError):
raise TabootTaskNotFoundException(the_task)
else:
result[k] = resolve_types(v, relative_to)
return result
else:
return ds
def instantiator(type_blob, relative_to="taboot.tasks", **kwargs):
"""
Instantiate a type, which is defined by a type blob in the
following format:
- If no paremeters are required for the type, then the blob
should be a single string describing the desired type
- If parameters are required, then the type blob must be a
dictionary with only one key that is a string describing
the desired type. The value associated with this key
should be dictionary which maps the parameter:value pairs
required when instantiating the type.
Returns the instantiated object.
"""
__import__(relative_to)
def str2type(s):
import sys
tokens = s.split('.')
if len(tokens) == 1:
return getattr(sys.modules[relative_to], tokens[0])
else:
pkg = "%s.%s" % (relative_to, tokens[0])
try:
__import__(pkg)
task = getattr(sys.modules[pkg], tokens[1])
except (AttributeError, ImportError):
missing_task = ".".join([pkg, tokens[1]])
raise TabootTaskNotFoundException(missing_task)
return task
if isinstance(type_blob, basestring):
instance_type = str2type(type_blob)
else:
if len(type_blob.keys()) != 1:
raise Exception("Number of keys isn't 1")
instance_type = str2type(type_blob.keys()[0])
kwargs.update(type_blob[type_blob.keys()[0]])
try:
return instance_type(**kwargs)
except TypeError, e:
import pprint
log_error("Unable to instantiate %s with the following arguments:",
instance_type)
pprint.pprint(kwargs)
log_error("Full backtrace below\n")
raise
def make_blob_copy(blob):
"""
Concat the header with the given blob to edit into a temporary
file.
Returns a tuple of the new file name and the location to position
the cursor at when opening.
"""
if isfile(taboot.edit_header):
header = open(taboot.edit_header).read()
offset = len(header.split("\n"))
log_debug("Header file is %s lines long", offset)
else:
log_warn("Header file not found when launching Taboot edit mode!")
log_warn("Expected to find: %s", taboot.edit_header)
header = ""
offset = 0
tmpfile = tempfile.NamedTemporaryFile(suffix=".yaml",
prefix="taboot-")
header = header.replace("$TMPFILE$", tmpfile.name)
tmpfile.write(header)
tmpfile.write(blob)
tmpfile.flush()
return (tmpfile, offset)
def sync_blob_copy(tmpfile):
"""
For backwards compatibility we copy the blob back manually to
tmpfile. NamedTemporaryFile didn't support the 'delete' parameter
until py2.6.
"""
blob = open(tmpfile.name).read()
tmpname = tmpfile.name
tmpfile.close() # The file is erased when close()'d
open(tmpname, 'w').write(blob)
return blob
def flatten(x):
"""
Flatten an arbitrary depth nested list.
"""
# Lifted from: http://stackoverflow.com/a/406822/263969
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def print_stderr(msg):
sys.stderr.write(msg)
sys.stderr.flush()
def parse_int_or_all(arg):
"""
Custom ArgumentParser type which accept integers and 'all' as
arguments to the `concurrency` parameter.
"""
value = re.match(r'^((\d+)|(all))$', arg, re.IGNORECASE)
if not value:
raise ArgumentTypeError("'" + arg + "' is not a valid value. \
Expecting an integer or 'all'.")
else:
return value.group(1)
|
tbielawa/Taboot
|
taboot/util.py
|
Python
|
gpl-3.0
| 6,426
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
i_evapo_mh.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import absolute_import
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .i import verifyRasterNum
def checkParameterValuesBeforeExecuting(alg):
if alg.getParameterValue('-h') and alg.getParameterValue('precipitation'):
return alg.tr('You can\'t use original Hargreaves flag and precipitation parameter together!')
if not alg.getParameterValue('-h') and not alg.getParameterValue('precipitation'):
return alg.tr('If you don\'t use original Hargreaves flag, you must set the precipitation raster parameter!')
return None
|
drnextgis/QGIS
|
python/plugins/processing/algs/grass7/ext/i_evapo_mh.py
|
Python
|
gpl-2.0
| 1,626
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-02 03:54
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_profile_homeaddress'),
]
operations = [
migrations.AddField(
model_name='profile',
name='accounts',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='profile',
name='city',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='country',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='county',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='first_name',
field=models.CharField(max_length=200, null=True, verbose_name='First Name'),
),
migrations.AddField(
model_name='profile',
name='income',
field=models.PositiveIntegerField(null=True, verbose_name='Earned Money'),
),
migrations.AddField(
model_name='profile',
name='last_name',
field=models.CharField(max_length=200, null=True, verbose_name='Last Name'),
),
migrations.AddField(
model_name='profile',
name='phone',
field=models.CharField(max_length=20, null=True, verbose_name='Cell Phone'),
),
migrations.AddField(
model_name='profile',
name='province',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='profile',
name='ship_address',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='profile',
name='street',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='profile',
name='user_type',
field=models.CharField(max_length=20, null=True, verbose_name='User Type'),
),
]
|
sunlaiqi/fundiy
|
src/profiles/migrations/0003_auto_20170102_1154.py
|
Python
|
mit
| 2,482
|
from django.forms import ModelForm, Textarea, TextInput, HiddenInput, CharField, IntegerField, Select
from django.utils.translation import ugettext as _
from .models import Person, PersonEvent, PersonRelation, PersonAttribute, Group, GroupMember, GroupAttribute, Tag, \
PersonTag, PersonLink, GroupLink, Event, EventName, Country, Language
class PersonForm(ModelForm):
class Meta:
model = Person
fields = ["name", "lastname", "firstname", "gender", "birthdate", "birthplace", "deathdate", "deathplace", "infos"]
widgets = {
"birthdate": TextInput(attrs={"placeholder": "YYYY-MM-DD"}),
"infos": Textarea(attrs={"rows": 4}),
}
class EventForm(ModelForm):
class Meta:
model = Event
fields = ["is_unique", "icon", "description"]
widgets = {
"description": Textarea(attrs={"rows": 4}),
}
class EventNameForm(ModelForm):
class Meta:
model = EventName
fields = ["lang", "name"]
class PersonEventForm(ModelForm):
class Meta:
model = PersonEvent
fields = ["event", "date", "description"]
widgets = {
"description": Textarea(attrs={"rows": 4}),
}
class PersonRelationForm(ModelForm):
personto_id = IntegerField(label=_("Person"), widget=Select(attrs={"class": "select_person"}), required=False)
personto_name = CharField(widget=HiddenInput(), max_length=100, required=False)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
if hasattr(self.instance, "personto"):
self.initial["personto_id"] = self.instance.personto.id
self.initial["personto_name"] = self.instance.personto.name
else:
self.initial["personto_id"] = None
self.initial["personto_name"] = ""
def _clean_fields(self, *args, **kwargs):
value = self.data.get(self.add_prefix("person"))
if value == "-1":
name = self.data.get(self.add_prefix("personto_name"))
#self.instance.personto = Person.objects.get_or_create(name=name)[0]
self.instance.personto = Person.objects.create(name=name)
self.data = self.data.copy()
self.data[self.add_prefix("personto")] = self.instance.personto.id
self.data[self.add_prefix("personto_id")] = self.instance.personto.id
super(ModelForm, self)._clean_fields(*args, **kwargs)
def save(self, commit=True):
if self.instance.personto is None and self.cleaned_data.get("personto_id"):
self.instance.personto = Person.objects.get(id=int(self.cleaned_data.get("personto_id")))
if hasattr(self.instance, "person") and self.instance.personto == self.instance.person:
raise ValueError("person and personto must be different.")
return super(ModelForm, self).save(commit=commit)
class Meta:
model = PersonRelation
fields = ["type", "personto_id", "personto_name", "personto", "date_start", "date_end", "infos"]
widgets = {
"personto": HiddenInput(),
"infos": Textarea(attrs={"rows": 4}),
}
class PersonTagForm(ModelForm):
tag_name = CharField(label=_("Tag name"), max_length=100)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.initial["tag_name"] = ""
self.fields["tag"].required = False
try:
if self.instance.tag:
self.initial["tag_name"] = self.instance.tag.name
except:
pass
def save(self, commit=True):
name = self.cleaned_data.get("tag_name").strip()
if self.initial["tag_name"] != name:
tag = Tag.objects.get_or_create(name=name)[0]
self.instance.tag = tag
return super(ModelForm, self).save(commit=commit)
class Meta:
model = PersonTag
fields = ["tag", "tag_name", "infos"]
widgets = {
"tag": HiddenInput(),
}
class PersonAttributeForm(ModelForm):
class Meta:
model = PersonAttribute
fields = ["name", "value"]
class PersonLinkForm(ModelForm):
class Meta:
model = PersonLink
fields = ["name", "uri", "lang"]
class GroupForm(ModelForm):
class Meta:
model = Group
fields = ["name", "type", "infos"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class GroupAttributeForm(ModelForm):
class Meta:
model = GroupAttribute
fields = ["name", "value"]
class GroupLinkForm(ModelForm):
class Meta:
model = GroupLink
fields = ["name", "uri", "lang"]
class GroupMemberForm(ModelForm):
person_id = IntegerField(label=_("Name"), widget=Select(attrs={"class": "select_person"}), required=False)
person_name = CharField(widget=HiddenInput(), max_length=100, required=False)
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.initial["person_id"] = None
self.initial["person_name"] = ""
try:
self.initial["person_id"] = self.instance.person.id
self.initial["person_name"] = self.instance.person.name
except:
pass
def _clean_fields(self, *args, **kwargs):
value = self.data.get(self.add_prefix("person"))
if value == "-1":
name = self.data.get(self.add_prefix("person_name"))
#self.instance.person = Person.objects.get_or_create(name=name)[0]
self.instance.person = Person.objects.create(name=name)
self.data = self.data.copy()
self.data[self.add_prefix("person")] = self.instance.person.id
self.data[self.add_prefix("person_id")] = self.instance.person.id
super(ModelForm, self)._clean_fields(*args, **kwargs)
def save(self, commit=True):
if self.instance.person is None and self.cleaned_data.get("person_id"):
self.instance.person = Person.objects.get(id=int(self.cleaned_data.get("person_id")))
return super(ModelForm, self).save(commit=commit)
class Meta:
model = GroupMember
fields = ["person_id", "person_name", "person", "date_start", "date_end", "infos"]
widgets = {
"person": HiddenInput(),
}
class TagForm(ModelForm):
class Meta:
model = Tag
fields = ["name", "parent", "persons_order", "movies_order", "series_order", "books_order", "games_order", "infos"]
widgets = {
"infos": Textarea(attrs={"rows": 4}),
}
class CountryForm(ModelForm):
class Meta:
model = Country
fields = ["name", "code", "iso"]
class LanguageForm(ModelForm):
class Meta:
model = Language
fields = ["name", "code", "iso1", "iso2", "iso3"]
|
teddy-michel/Mimir
|
base/forms.py
|
Python
|
gpl-3.0
| 6,913
|
from sys import stdin
from collections import deque
ALPHABET = "abcdefghijklmnopqrstuvwxyz"
def readdict():
d = set()
while True:
word = stdin.readline().strip()
if len(word) == 0:
break
d.add(word)
return d
def readwords():
cases = []
while True:
case = tuple(stdin.readline().split())
if not case:
break
cases.append(case)
return cases
def word_mods(w, dictionary):
"""Return list of all valid step modifications from a given word"""
mods = []
for pos in range(len(w)):
for c in ALPHABET:
word = w[:pos]+c+w[(pos+1):]
if word in dictionary and word!=w:
mods.append(word)
return mods
def find_solution(start, end, dictionary):
"""Use BFS to find shortest path from start to end words"""
if start == end:
return [start, end]
# Add endword to dictionary
add_end = end not in dictionary
if add_end:
dictionary.add(end)
# BFS Search
parent = {}
q = deque([start])
while q:
word = q.popleft()
if word == end:
break
# Enqueue word mods that haven't been reached yet.
for w in word_mods(word, dictionary):
if w not in parent:
parent[w] = word
q.append(w)
# Restore dictionary to original state
if add_end:
dictionary.remove(end)
if word != end:
return []
# Reconstruct modification path
path = [word]
while parent[word]!= start:
path.append(parent[word])
word = parent[word]
path.append(start)
return list(reversed(path))
if __name__ == '__main__':
dictionary = readdict()
words = list(reversed(readwords()))
while words:
start, end = words.pop()
sequence = find_solution(start, end, dictionary)
if not sequence:
print('No solution.')
else:
[print(w) for w in sequence]
if words:
print()
|
secnot/uva-onlinejudge-solutions
|
10150 - Doublets/main.py
|
Python
|
mit
| 2,083
|
#!/usr/bin/env python
"""Module collecting different functions to create lightness stimuli. Includes
Cornsweet edges
Todorovic's Cornsweet checkerboard
square waves
White's illusion in different forms
disc and ring stimuli
"""
import numpy as np
from stimuli.utils import degrees_to_pixels, resize_array
def cornsweet(size, ppd, contrast, ramp_width=3, exponent=2.75,
mean_lum=.5):
"""
Create a matrix containing a rectangular Cornsweet edge stimulus.
The 2D luminance profile of the stimulus is defined as
L = L_mean +/- (1 - X / w) ** a * L_mean * C/2 for the ramp and
L = L_mean for the area beyond the ramp.
X is the distance to the edge, w is the width of the ramp, a is a variable
determining the steepness of the ramp, and C is the contrast at the edge,
defined as C = (L_max-L_min) / L_mean.
Parameters
----------
size : tuple of 2 numbers
the size of the matrix in degrees of visual angle
ppd : number
the number of pixels in one degree of visual angle
contrast : number in [0,1]
the contrast at the Cornsweet edge, defined as
(max_luminance - min_luminance) / mean_luminance
ramp_width : number (optional)
the width of the luminance ramp in degrees of visual angle.
Default is 3.
exponent : number (optional)
Determines the steepness of the ramp. Default is 2.75. An
exponent value of 0 leads to a stimulus with uniform flanks.
mean_lum : number
The mean luminance of the stimulus, i.e. the value outside of
the ramp area.
Returns
-------
stim : 2D ndarray
References
----------
The formula and default values are taken from Boyaci, H., Fang, F., Murray,
S.O., Kersten, D. (2007). Responses to Lightness Variations in Early Human
Visual Cortex. Current Biology 17, 989-993.
"""
# compute size as the closest even number of pixel corresponding to the
# size given in degrees of visual angle.
size = np.round(np.tan(np.radians(np.array(size) / 2.)) /
np.tan(np.radians(.5)) * ppd / 2) * 2
stim = np.ones(size) * mean_lum
dist = np.arange(size[1] / 2 )
dist = np.degrees(np.arctan(dist / 2. / ppd * 2 * np.tan(np.radians(.5))))\
* 2
dist /= ramp_width
dist[dist > 1] = 1
profile = (1 - dist) ** exponent * mean_lum * contrast / 2
stim[:, 0:size[1]/2] += profile[::-1]
stim[:, size[1]/2:] -= profile
return stim
def todorovic(coc, vert_rep, horz_rep):
"""
Create a checkerboard illusion by appropriately aligning COC stimuli, in
the way demonstrated by Todorovic (1987).
Parameters
----------
coc : ndarray
The base Craig-O'Brien-Cornsweet stimulus, created with cornsweet().
It should have a small ramp-width compared to its size, moderate
contrast, and be square.
horz_rep : int
number of horizontal repetitions of the cornsweet stimulus.
vert_rep : int
number of vertical repetitions.
Returns
-------
stim: 2D ndarray
References
----------
Todorovic, D. (1987). The Craik-O'Brien-Cornsweet effect: new
varieties and their theoretical implications. Perception & psychophysics,
42(6), 545-60, Plate 4.
"""
stim = np.tile(np.hstack((coc, np.fliplr(coc))), (1, horz_rep / 2))
if horz_rep % 2 != 0:
stim = np.hstack((stim, stim[:, 0:coc.shape[1]]))
stim = np.tile(np.vstack((stim, np.roll(stim, coc.shape[1], 1))),
(vert_rep / 2, 1))
if vert_rep % 2 != 0:
stim = np.vstack((stim, stim[0:coc.shape[0], :]))
return stim
def square_wave(shape, ppd, contrast, frequency, mean_lum=.5, period='ignore',
start='high'):
"""
Create a horizontal square wave of given spatial frequency.
Parameters
----------
shape : tuple of 2 numbers
The shape of the stimulus in degrees of visual angle. (y,x)
ppd : number
the number of pixels in one degree of visual angle
contrast : number in [0,1]
the contrast of the grating, defined as
(max_luminance - min_luminance) / mean_luminance
frequency : number
the spatial frequency of the wave in cycles per degree
mean_lum : number
the mean luminance of the grating, i.e. (max_lum + min_lum) / 2.
The average luminance of the actual stimulus can differ slightly
from this value if the stimulus is not an integer of cycles big.
period : string in ['ignore', 'full', 'half'] (optional)
specifies if the period of the wave is taken into account when
determining exact stimulus dimensions.
'ignore' simply converts degrees to pixesl
'full' rounds down to garuantee a full period
'half' adds a half period to the size 'full' would yield.
Default is 'ignore'.
start : string in ['high', 'low'] (optional)
specifies if the wave starts with a high or low value. Default is
'high'.
Returns
-------
stim : 2D ndarray
the square wave stimulus
"""
if not period in ['ignore', 'full', 'half']:
raise TypeError('size not understood: %s' % period)
if not start in ['high', 'low']:
raise TypeError('start value not understood: %s' % start)
if frequency > ppd / 2:
raise ValueError('The frequency is limited to 1/2 cycle per pixel.')
shape = degrees_to_pixels(np.array(shape), ppd).astype(int)
pixels_per_cycle = int(degrees_to_pixels(1. / frequency / 2, ppd) + .5) * 2
if period is 'full':
shape[1] = shape[1] / pixels_per_cycle * pixels_per_cycle
elif period is 'half':
shape[1] = shape[1] / pixels_per_cycle * pixels_per_cycle + \
pixels_per_cycle / 2
diff = type(mean_lum)(contrast * mean_lum)
high = mean_lum + diff
low = mean_lum - diff
stim = np.ones(shape) * (low if start is 'high' else high)
index = [i + j for i in range(pixels_per_cycle / 2)
for j in range(0, shape[1], pixels_per_cycle)
if i + j < shape[1]]
stim[:, index] = low if start is 'low' else high
return stim
def whites_illusion_bmcc(shape, ppd, contrast, frequency, mean_lum=.5,
patch_height=None, start='high', sep=1):
"""
Create a version of White's illusion on a square wave, in the style used by
Blakeslee and McCourt (1999).
Parameters
----------
shape : tuple of 2 numbers
The shape of the stimulus in degrees of visual angle. (y,x)
ppd : number
the number of pixels in one degree of visual angle
contrast : number in [0,1]
the contrast of the grating, defined as
(max_luminance - min_luminance) / mean_luminance
frequency : number
the spatial frequency of the wave in cycles per degree
mean_lum : number
the mean luminance of the grating, i.e. (max_lum + min_lum) / 2.
The average luminance of the actual stimulus can differ slightly
from this value if the stimulus is not an integer of cycles big.
patch_height : number
the height of the gray patches, in degrees of visual ange
start : string in ['high', 'low'] (optional)
specifies if the wave starts with a high or low value. Default is
'high'.
sep : int (optional)
the separation distance between the two test patches, measured in
full grating cycles. Default is 1.
Returns
-------
stim : 2D ndarray
the stimulus
References
----------
Blakeslee B, McCourt ME (1999). A multiscale spatial filtering account of
the White effect, simultaneous brightness contrast and grating induction.
Vision research 39(26):4361-77.
"""
stim = square_wave(shape, ppd, contrast, frequency, mean_lum, 'full',
start)
half_cycle = int(degrees_to_pixels(1. / frequency / 2, ppd) + .5)
if patch_height is None:
patch_height = stim.shape[0] // 3
else:
patch_height = degrees_to_pixels(patch_height, ppd)
y_pos = (stim.shape[0] - patch_height) // 2
stim[y_pos: -y_pos,
stim.shape[1] / 2 - (sep + 1) * half_cycle:
stim.shape[1] / 2 - sep * half_cycle] = mean_lum
stim[y_pos: -y_pos,
stim.shape[1] / 2 + sep * half_cycle:
stim.shape[1] / 2 + (sep + 1) * half_cycle] = mean_lum
return stim
def contours_white_bmmc(shape, ppd, contrast, frequency, mean_lum=.5,
patch_height=None, sep=1, orientation='vertical', contour_width=6):
"""
Create stimuli with contours masking either the vertical or the horizontal
borders of the test patches in White's illusion (Blakeslee, McCourt
version).
Parameters
----------
shape : tuple of 2 numbers
The shape of the stimulus in degrees of visual angle. (y,x)
ppd : number
the number of pixels in one degree of visual angle
contrast : number in [0,1]
the contrast of dark vs bright contours, defined as
(max_luminance - min_luminance) / (2 * mean_luminance)
frequency : number
the spatial frequency of the White's stimulus to be masked in
cycles per degree
mean_lum : number
the background luminance of the masking stimuli.
patch_height : number
the height of the gray patches to be masked, in degrees of
visual ange
sep : int (optional)
the separation distance between the two test patches, measured in
full grating cycles. Default is 1.
orientation : ['vertical', 'horizontal'] (optional)
the orientation of the border to be masked. Default is
'vertical'.
contour_width : number
the width of the masking contour in pixels
Returns
-------
masks : tuple of two 2D ndarrays
the contour adaptation masks. masks[0] has dark contours, mask[1]
has bright contours.
"""
shape = degrees_to_pixels(np.array(shape), ppd).astype(int)
pixels_per_cycle = int(degrees_to_pixels(1. / frequency / 2, ppd) + .5) * 2
shape[1] = shape[1] // pixels_per_cycle * pixels_per_cycle
# determine pixel width of individual grating bars (half cycle)
hc = pixels_per_cycle // 2
if patch_height is None:
patch_height = shape[0] // 3
else:
patch_height = degrees_to_pixels(patch_height, ppd)
y_pos = (shape[0] - patch_height) // 2
x_pos = (shape[1] // 2 - (sep + 1) * hc,
shape[1] // 2 + sep * hc)
mask_dark = np.ones(shape) * mean_lum
mask_bright = np.ones(shape) * mean_lum
idx_mask = np.zeros(shape, dtype=bool)
bright = mean_lum * (1 + contrast)
dark = mean_lum * (1 - contrast)
offset = contour_width // 2
if orientation == 'vertical':
idx_mask[y_pos: -y_pos,
x_pos[0] - offset : x_pos[0] + offset] = True
idx_mask[y_pos: -y_pos,
x_pos[0] + hc - offset : x_pos[0] + hc + offset] = True
idx_mask[y_pos: -y_pos,
x_pos[1] - offset : x_pos[1] + offset] = True
idx_mask[y_pos: -y_pos,
x_pos[1] + hc - offset : x_pos[1] + hc + offset] = True
elif orientation == 'horizontal':
idx_mask[y_pos - offset : y_pos + offset,
x_pos[0] : x_pos[0] + hc] = True
idx_mask[y_pos - offset : y_pos + offset,
x_pos[1] : x_pos[1] + hc] = True
idx_mask[-y_pos - offset : -y_pos + offset,
x_pos[0] : x_pos[0] + hc] = True
idx_mask[-y_pos - offset : -y_pos + offset,
x_pos[1] : x_pos[1] + hc] = True
mask_dark[idx_mask] = dark
mask_bright[idx_mask] = bright
return (mask_dark, mask_bright)
def whites_illusion_gil(shape, ppd, contrast, frequency, mean_lum=.5,
start='low'):
"""
Create a version of White's illusion on a square wave, in the style used by
Gilchrist (2006, p. 281)
Parameters
----------
shape : tuple of 2 numbers
The shape of the stimulus in degrees of visual angle. (y,x)
ppd : number
the number of pixels in one degree of visual angle
contrast : number in [0,1]
the contrast of the grating, defined as
(max_luminance - min_luminance) / mean_luminance
frequency : number
the spatial frequency of the wave in cycles per degree
mean_lum : number
the mean luminance of the grating, i.e. (max_lum + min_lum) / 2.
The average luminance of the actual stimulus can differ slightly
from this value if the stimulus is not an integer of cycles big.
start : string in ['high', 'low'] (optional)
specifies if the wave starts with a high or low value. Default is
'high'.
Returns
-------
stim : 2D ndarray
the stimulus
References
----------
Gilchrist A (2006). Seeing Black and White. New York, New York, USA: Oxford
University Press.
"""
stim = square_wave(shape, ppd, contrast, frequency, mean_lum, 'half',
start)
half_cycle = int(degrees_to_pixels(1. / frequency / 2, ppd) + .5)
on_dark_idx = [i for i in range(int(half_cycle * 2.5),
int(stim.shape[1] - half_cycle * .5))
if stim[0, i] < mean_lum]
on_light_idx = [i for i in range(int(half_cycle * 1.5),
int(stim.shape[1] - half_cycle * 1.5))
if stim[0, i] > mean_lum]
stim[stim.shape[0] / 5: stim.shape[0] / 5 * 2, on_light_idx] = mean_lum
stim[stim.shape[0] / 5 * 3: stim.shape[0] / 5 * 4, on_dark_idx] = mean_lum
# randomize border cutoff
max_cut = stim.shape[0] / 10
bg = stim[0, half_cycle]
for start_idx in range(0 if start is 'low' else half_cycle,
stim.shape[1] - half_cycle, 2 * half_cycle):
stim[0 : np.random.randint(max_cut),
start_idx : start_idx + half_cycle] = bg
stim[stim.shape[0] - np.random.randint(max_cut):,
start_idx : start_idx + half_cycle] = bg
return stim
def disc_and_ring(shape, radii, values, bg=0, ppd=30, ssf=5):
"""
Create a disc and ring stimulus with an arbitrary number of rings.
Parameters
----------
shape : tuple of 2 numbers
The shape of the stimulus in degrees of visual angle. (y,x)
radii : tuple of numbers
the radii of the circles in degrees of visual angle, starting from
the largest.
values : tuple of numbers
the gray values to assign to the circles, starting at the
outermost. Must be the same length as radii.
bg : number (optional)
the background value of the stimulus. Default is 0.
ppd : number (optional)
the number of pixels in one degree of visual angle. Default is 30.
ssf : int (optional)
the supersampling-factor used for anti-aliasing. Default is 5.
Returns
-------
stim : 2D ndarray
the stimulus
"""
assert len(radii) == len(values)
# create stimulus at 5 times size to allow for supersampling antialiasing
stim = np.ones(degrees_to_pixels(np.array(shape), ppd).astype(int) * ssf) * bg
# compute distance from center of array for every point, cap at 1.0
x = np.linspace(-stim.shape[1] / 2., stim.shape[1] / 2., stim.shape[1])
y = np.linspace(-stim.shape[0] / 2., stim.shape[0] / 2., stim.shape[0])
Dist = np.sqrt(x[np.newaxis, :] ** 2 + y[:, np.newaxis] ** 2)
radii = degrees_to_pixels(np.array(radii), ppd) * ssf
for radius, value in zip(radii, values):
stim[Dist < radius] = value
# downsample the stimulus by local averaging along rows and columns
sampler = resize_array(np.eye(stim.shape[0] / ssf), (1, ssf))
return np.dot(sampler, np.dot(stim, sampler.T)) / ssf ** 2
|
TUBvision/stimuli
|
lightness/lightness.py
|
Python
|
gpl-2.0
| 16,367
|
"""Tests for control_flow_ops.py."""
import tensorflow.python.platform
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import standard_ops as tf
from tensorflow.python.platform import googletest
class GroupTestCase(TensorFlowTestCase):
def _StripNode(self, nd):
snode = graph_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input)
if nd.device:
snode.device = nd.device
return snode
def _StripGraph(self, gd):
"""Copy gd keeping only, node.name, node.op, node.input, and node.device."""
return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node])
def testGroup_NoDevices(self):
with ops.Graph().as_default() as g:
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
c = tf.constant(0, name="c")
tf.group(a.op, b.op, c.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const"}
node { name: "b" op: "Const"}
node { name: "c" op: "Const"}
node { name: "root" op: "NoOp" input: "^a" input: "^b" input: "^c" }
""", self._StripGraph(gd))
def testGroup_OneDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0"):
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
tf.group(a.op, b.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0" }
node { name: "b" op: "Const" device: "/task:0" }
node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" }
""", self._StripGraph(gd))
def testGroup_MultiDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0"):
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
with g.device("/task:1"):
c = tf.constant(0, name="c")
d = tf.constant(0, name="d")
with g.device("/task:2"):
tf.group(a.op, b.op, c.op, d.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0"}
node { name: "b" op: "Const" device: "/task:0"}
node { name: "c" op: "Const" device: "/task:1"}
node { name: "d" op: "Const" device: "/task:1"}
node { name: "root/NoOp" op: "NoOp" input: "^a" input: "^b"
device: "/task:0" }
node { name: "root/NoOp_1" op: "NoOp" input: "^c" input: "^d"
device: "/task:1" }
node { name: "root" op: "NoOp" input: "^root/NoOp" input: "^root/NoOp_1"
device: "/task:2" }
""", self._StripGraph(gd))
class ShapeTestCase(TensorFlowTestCase):
def testShape(self):
with ops.Graph().as_default():
tensor = tf.constant([1.0, 2.0])
self.assertEquals([2], tensor.get_shape())
self.assertEquals([2],
control_flow_ops.with_dependencies(
[tf.constant(1.0)], tensor).get_shape())
if __name__ == "__main__":
googletest.main()
|
liyu1990/tensorflow
|
tensorflow/python/ops/control_flow_ops_test.py
|
Python
|
apache-2.0
| 3,164
|
# Copyright (c) 2015, Julian Straub <jstraub@csail.mit.edu> Licensed
# under the MIT license. See the license file LICENSE.
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
import numpy as np
#import cv2
import scipy.io
import subprocess as subp
import os, re, time
import argparse
#from vpCluster.rgbd.rgbdframe import RgbdFrame
#from vpCluster.manifold.sphere import Sphere
#from js.utils.config import Config2String
#from js.utils.plot.pyplot import SaveFigureAsImage
def run(cfg,reRun):
#args = ['../build/dpSubclusterSphereGMM',
# args = ['../build/dpStickGMM',
args = ['../build/bin/realtimeDDPvMF_file',
'-i {}'.format(cfg['rootPath']+cfg['dataPath']+"_d.png"),
'-o {}'.format(cfg['outName']),
'-l {}'.format(100), # lambda
'-s {}'.format(1), # survival
]
if reRun:
print ' '.join(args)
print ' --------------------- '
time.sleep(1)
err = subp.call(' '.join(args),shell=True)
if err:
print 'error when executing'
# raw_input()
# z = np.loadtxt(cfg['outName']+'.lbl',dtype=int,delimiter=' ')
# sil = np.loadtxt(cfg['outName']+'.lbl_measures.csv',delimiter=" ")
def config2Str(cfg):
use = ['mode','dt','tMax','nCGIter']
st = use[0]+'_'+str(cfg[use[0]])
for key in use[1::]:
if key in cfg.keys():
st += '-'+key+'_'+str(cfg[key])
return st
parser = argparse.ArgumentParser(description = 'rtmf extraction for NYU')
parser.add_argument('-s','--start', type=int, default=0,
help='start image Nr')
parser.add_argument('-e','--end', type=int, default=1449,
help='end image Nr')
parser.add_argument('-m','--mode', default='DP-vMF-means',
help='spkm, DP-vMF-means')
parser.add_argument('-nyu', action='store_true',
help='switch to process the NYU dataset')
args = parser.parse_args()
cfg=dict()
cfg['rootPath'] = '/data/vision/fisher/data1/nyu_depth_v2/extracted/'
cfg['outputPath'] = '/data/vision/scratch/fisher/jstraub/dpMMlowVar/nyu2'
cfg['mode'] = args.mode;
mode = ['multiFromFile']
reRun = True
printCmd = True
onlyPaperEval = True
paperEval = [
'bathroom_0028_691',
'home_office_0012_395',
'kitchen_0037_831',
'bedroom_0085_1084',
'living_room_0064_1314',
'bathroom_0015_664',
'office_kitchen_0001_409',
'bedroom_0026_914',
'bedroom_0032_935',
'bedroom_0043_959',
'conference_room_0002_342',
'dining_room_0030_1422',
'kitchen_0004_1',
'kitchen_0004_2',
'kitchen_0007_131',
'kitchen_0011_143',
'kitchen_0024_774',
'kitchen_0024_776',
'kitchen_0045_865',
'kitchen_0046_870',
'kitchen_0057_567',
'office_0008_15',
'office_0008_17',
'office_0009_19',
'office_0022_618',
'office_0022_619',
'office_0027_635',
'office_0027_638',
'office_kitchen_0001_409']
if not args.nyu:
print "only NYU supported now"
exit(1)
cfg['evalStart'] = args.start
cfg['evalEnd'] = args.end
indexPath = '/data/vision/fisher/data1/nyu_depth_v2/index.txt'
cfg['rootPath'] = '/data/vision/fisher/data1/nyu_depth_v2/extracted/'
cfg['outputPath'] = '/data/vision/scratch/fisher/jstraub/dpMMlowVar/nyu2/'
names =[]
with open(indexPath) as f:
allNames = f.read().splitlines() #readlines()
for i in range(len(allNames)):
if cfg['evalStart'] <= i and i <cfg['evalEnd']:
names.append(allNames[i])
print '@{}: {}'.format(len(names)-1,names[-1])
print names
rndInds = range(len(names)) # np.random.permutation(len(names))
for ind in rndInds:
if onlyPaperEval and names[ind] not in paperEval:
continue
cfg['dataPath'] = names[ind]
cfg['outName'] = cfg['outputPath']+cfg['dataPath']+'_'+config2Str(cfg)
print 'processing '+cfg['rootPath']+cfg['dataPath']
run(cfg,reRun)
|
jstraub/rtDDPvMF
|
python/evalNYU.py
|
Python
|
mit
| 3,598
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.message import defined_messages
from cinder import objects
from cinder.objects import fields
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
from cinder import test
from cinder.tests.unit.consistencygroup import fake_consistencygroup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'cinder.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.manager._startup_delay = False
self.context = context.get_admin_context()
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
@mock.patch('eventlet.sleep')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities')
def test_init_host_with_rpc(self, publish_capabilities_mock, sleep_mock):
self.manager._startup_delay = True
self.manager.init_host_with_rpc()
publish_capabilities_mock.assert_called_once_with(mock.ANY)
sleep_mock.assert_called_once_with(CONF.periodic_interval)
self.assertFalse(self.manager._startup_delay)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'})
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.4'})
def test_reset(self, get_min_obj, get_min_rpc):
mgr = self.manager_cls()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual('1.3', volume_rpcapi.client.version_cap)
self.assertEqual('1.4',
volume_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current()
mgr.reset()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual(get_min_rpc.return_value,
volume_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
# Test no capabilities passes empty dictionary
service = 'fake_service'
host = 'fake_host'
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host)
_mock_update_cap.assert_called_once_with(service, host, {})
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_correct(self, _mock_update_cap):
# Test capabilities passes correctly
service = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_update_cap.assert_called_once_with(service, host, capabilities)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.message.api.API.create')
@mock.patch('cinder.db.volume_update')
def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_message_create,
_mock_sched_create):
# Test NoValidHost exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id,
'volume': {'id': volume.id, '_name_id': None,
'metadata': {}, 'admin_metadata': {},
'glance_metadata': {}}}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
volume.id,
{'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
_mock_message_create.assert_called_once_with(
self.context, defined_messages.UNABLE_TO_ALLOCATE,
self.context.project_id, resource_type='VOLUME',
resource_uuid=volume.id)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep')
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.side_effect = [False, False, True]
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
calls = [mock.call(1)] * 2
_mock_sleep.assert_has_calls(calls)
self.assertEqual(2, _mock_sleep.call_count)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.return_value = True
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get):
# Test NoValidHost exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception.
fake_updates = {'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get,
'available', fake_updates)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state_maintenance(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get):
fake_updates = {'status': 'available',
'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get,
'maintenance', fake_updates)
def _test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get, status, fake_updates):
volume = tests_utils.create_volume(self.context,
status=status,
previous_status='available')
fake_volume_id = volume.id
request_spec = {'volume_id': fake_volume_id}
_mock_host_passes.side_effect = exception.NoValidHost(reason="")
_mock_volume_get.return_value = volume
self.manager.migrate_volume_to_host(self.context, volume, 'host', True,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
fake_updates)
_mock_host_passes.assert_called_once_with(self.context, 'host',
request_spec, {})
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
@mock.patch('cinder.quota.QUOTAS.rollback')
def test_retype_volume_exception_returns_volume_state(
self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update):
# Test NoValidHost exception behavior for retype.
# Puts the volume in original state and eats the exception.
volume = tests_utils.create_volume(self.context,
status='retyping',
previous_status='in-use')
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume_attach = tests_utils.attach_volume(self.context, volume.id,
instance_uuid, None,
'/dev/fake')
_mock_vol_attachment_get.return_value = [volume_attach]
reservations = mock.sentinel.reservations
request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3},
'migration_policy': 'on-demand',
'quota_reservations': reservations}
_mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_host = mock.Mock(
side_effect=exception.NoValidHost(reason=""))
orig_retype = self.manager.driver.find_retype_host
self.manager.driver.find_retype_host = _mock_find_retype_host
self.manager.retype(self.context, volume, request_spec=request_spec,
filter_properties={})
_mock_find_retype_host.assert_called_once_with(self.context,
request_spec, {},
'on-demand')
quota_rollback.assert_called_once_with(self.context, reservations)
_mock_vol_update.assert_called_once_with(self.context, volume.id,
{'status': 'in-use'})
self.manager.driver.find_retype_host = orig_retype
def test_create_consistencygroup_exceptions(self):
with mock.patch.object(filter_scheduler.FilterScheduler,
'schedule_create_consistencygroup') as mock_cg:
original_driver = self.manager.driver
consistencygroup_obj = \
fake_consistencygroup.fake_consistencyobject_obj(self.context)
self.manager.driver = filter_scheduler.FilterScheduler
LOG = self.mock_object(manager, 'LOG')
self.mock_object(db, 'consistencygroup_update')
ex = exception.CinderException('test')
mock_cg.side_effect = ex
group_id = fake.CONSISTENCY_GROUP_ID
self.assertRaises(exception.CinderException,
self.manager.create_consistencygroup,
self.context,
consistencygroup_obj)
self.assertGreater(LOG.exception.call_count, 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': (
fields.ConsistencyGroupStatus.ERROR)})
mock_cg.reset_mock()
LOG.exception.reset_mock()
db.consistencygroup_update.reset_mock()
mock_cg.side_effect = exception.NoValidHost(
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, consistencygroup_obj)
self.assertGreater(LOG.error.call_count, 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': (
fields.ConsistencyGroupStatus.ERROR)})
self.manager.driver = original_driver
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
self.topic = 'fake_topic'
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities(self, _mock_update_cap):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.driver.update_service_capabilities(service_name, host,
capabilities)
_mock_update_cap.assert_called_once_with(service_name, host,
capabilities)
@mock.patch('cinder.scheduler.host_manager.HostManager.'
'has_all_capabilities', return_value=False)
def test_is_ready(self, _mock_has_caps):
ready = self.driver.is_ready()
_mock_has_caps.assert_called_once_with()
self.assertFalse(ready)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test schedule driver class.
Test cases for base scheduler driver class methods
that will fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
volume = fake_volume.fake_volume_obj(self.context)
_mock_volume_get.return_value = volume
driver.volume_update_db(self.context, volume.id, 'fake_host')
scheduled_at = volume.scheduled_at.replace(tzinfo=None)
_mock_vol_update.assert_called_once_with(
self.context, volume.id, {'host': 'fake_host',
'scheduled_at': scheduled_at})
|
Hybrid-Cloud/cinder
|
cinder/tests/unit/scheduler/test_scheduler.py
|
Python
|
apache-2.0
| 18,413
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
dashboard.py [options]
dashboard.py (-h | --help)
dashboard.py --version
Options:
--slurmjob-template <path> Slurm job dashboard template.
[default: /opt/qnib/grafana/templates/slurm_job.j2]
--slurm-template <path> Slurm Dashbard.
[default: /opt/qnib/grafana/templates/slurm.j2]
--delay <int> Seconds delay inbetween loop runs [default: 10]
--server If set loops over fetching information.
-h --help Show this screen.
--version Show version.
--loglevel, -L=<str> Loglevel [default: INFO]
(ERROR, CRITICAL, WARN, INFO, DEBUG)
--log2stdout, -l Log to stdout, otherwise to logfile. [default: False]
--logfile, -f=<path> Logfile to log to (default: <scriptname>.log)
--cfg, -c=<path> Configuration file.
"""
# load librarys
import logging
import os
import re
import codecs
import math
import ast
import sys
from datetime import datetime
from ConfigParser import RawConfigParser, NoOptionError
import time
import consul
from jinja2 import Template
try:
from docopt import docopt
except ImportError:
HAVE_DOCOPT = False
else:
HAVE_DOCOPT = True
__author__ = 'Christian Kniep <christian()qnib.org>'
__copyright__ = 'Copyright 2015 QNIB Solutions'
__license__ = """GPL v2 License (http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html)"""
class QnibConfig(RawConfigParser):
""" Class to abstract config and options
"""
specials = {
'TRUE': True,
'FALSE': False,
'NONE': None,
}
def __init__(self, opt):
""" init """
RawConfigParser.__init__(self)
if opt is None:
self._opt = {
"--log2stdout": False,
"--logfile": None,
"--loglevel": "ERROR",
}
else:
self._opt = opt
self.logformat = '%(asctime)-15s %(levelname)-5s [%(module)s] %(message)s'
self.loglevel = opt.get('--loglevel')
self.log2stdout = opt['--log2stdout']
if self.loglevel is None and opt.get('--cfg') is None:
print "please specify loglevel (-L)"
sys.exit(0)
self.eval_cfg()
self.eval_opt()
self.set_logging()
logging.info("SetUp of QnibConfig is done...")
def do_get(self, section, key, default=None):
""" Also lent from: https://github.com/jpmens/mqttwarn
"""
try:
val = self.get(section, key)
if val.upper() in self.specials:
return self.specials[val.upper()]
return ast.literal_eval(val)
except NoOptionError:
return default
except ValueError: # e.g. %(xxx)s in string
return val
except:
raise
return val
def config(self, section):
''' Convert a whole section's options (except the options specified
explicitly below) into a dict, turning
[config:mqtt]
host = 'localhost'
username = None
list = [1, 'aaa', 'bbb', 4]
into
{u'username': None, u'host': 'localhost', u'list': [1, 'aaa', 'bbb', 4]}
Cannot use config.items() because I want each value to be
retrieved with g() as above
SOURCE: https://github.com/jpmens/mqttwarn
'''
d = None
if self.has_section(section):
d = dict((key, self.do_get(section, key))
for (key) in self.options(section) if key not in ['targets'])
return d
def eval_cfg(self):
""" eval configuration which overrules the defaults
"""
cfg_file = self._opt.get('--cfg')
if cfg_file is not None:
fd = codecs.open(cfg_file, 'r', encoding='utf-8')
self.readfp(fd)
fd.close()
self.__dict__.update(self.config('defaults'))
def eval_opt(self):
""" Updates cfg according to options """
def handle_logfile(val):
""" transforms logfile argument
"""
if val is None:
logf = os.path.splitext(os.path.basename(__file__))[0]
self.logfile = "%s.log" % logf.lower()
else:
self.logfile = val
self._mapping = {
'--logfile': lambda val: handle_logfile(val),
}
for key, val in self._opt.items():
if key in self._mapping:
if isinstance(self._mapping[key], str):
self.__dict__[self._mapping[key]] = val
else:
self._mapping[key](val)
break
else:
if val is None:
continue
mat = re.match("\-\-(.*)", key)
if mat:
self.__dict__[mat.group(1)] = val
else:
logging.info("Could not find opt<>cfg mapping for '%s'" % key)
def set_logging(self):
""" sets the logging """
self._logger = logging.getLogger()
self._logger.setLevel(logging.DEBUG)
if self.log2stdout:
hdl = logging.StreamHandler()
hdl.setLevel(self.loglevel)
formatter = logging.Formatter(self.logformat)
hdl.setFormatter(formatter)
self._logger.addHandler(hdl)
else:
hdl = logging.FileHandler(self.logfile)
hdl.setLevel(self.loglevel)
formatter = logging.Formatter(self.logformat)
hdl.setFormatter(formatter)
self._logger.addHandler(hdl)
def __str__(self):
""" print human readble """
ret = []
for key, val in self.__dict__.items():
if not re.match("_.*", key):
ret.append("%-15s: %s" % (key, val))
return "\n".join(ret)
def __getitem__(self, item):
""" return item from opt or __dict__
:param item: key to lookup
:return: value of key
"""
if item in self.__dict__.keys():
return self.__dict__[item]
else:
return self._opt[item]
class SlurmDash(object):
""" Class to hold the functioanlity of the script
"""
def __init__(self, cfg):
""" Init of instance
"""
self._cfg = cfg
self._consul = consul.Consul(host='consul.service.consul')
self._template = {}
self._last_idx = None
self._last_run = time.time()
self._target = {
'slurmjob': '/var/www/grafana/app/dashboards/slurm_%(jobid)s.json',
'slurm': '/var/www/grafana/app/dashboards/slurm.json',
}
with open(cfg['--slurmjob-template'], "r") as fd:
self._template['slurmjob'] = Template(fd.read())
with open(cfg['--slurm-template'], "r") as fd:
self._template['slurm'] = Template(fd.read())
def run(self):
""" do the hard work
"""
if self._cfg['--server']:
self.loop()
else:
self.fetch_info()
def loop(self):
""" loops over changes in kv key
"""
while True:
logging.debug("wait for index '%s'" % self._last_idx)
try:
self._last_idx, val = self._consul.kv.get('slurm/job/', wait='30s', index=self._last_idx)
except KeyboardInterrupt:
logging.info("Gracefully exit after CTRL-C")
self.close()
break
self.fetch_info()
now = time.time()
since = now - self._last_run
delay = max(0, int(self._cfg['--delay']) - since)
self._cfg._logger.info("Took: %ssec, sleep for %ssec" % (since, delay))
self._last_run = now
time.sleep(delay)
logging.debug("Got index '%s'" % self._last_idx)
def close(self):
""" exists
"""
pass
def fetch_info(self):
""" fetches info from K/V store
"""
idx, values = self._consul.kv.get('slurm/job/', recurse=True)
jobs = {}
if values is None:
return
dt_end = None
dt_start = None
dt_diff = {}
def diff_time(start, end):
dt_diff = {}
return math.floor(((end-start).seconds) / 60)
#dt_diff['day'] = divmod(dt_end-dt_start,86400) # days
#dt_diff['hour'] = divmod(dt_diff['day'][1],3600) # hours
#dt_diff['min'] = divmod(dt_diff['hour'][1],60) # minutes
#dt_diff['sec'] = divmod(dt_diff['min'][1],60) # minutes
#return dt_diff
for value in values:
jobid, key = value['Key'].split("/")[2:]
jobid = int(jobid)
val = value['Value']
key = key.lower()
if jobid not in jobs.keys():
jobs[jobid] = {}
if key == 'nodelist':
jobs[jobid][key] = val.split(",")
elif key == "start":
dt_start = datetime.fromtimestamp(int(val))
jobs[jobid]['start_grafana'] = dt_start.strftime("%FT%H:%M:%S.%fZ")
jobs[jobid]['start_human'] = dt_start.strftime("%F %H:%M:%S")
if dt_end:
jobs[jobid]['duration'] = diff_time(dt_start, dt_end)
elif key == 'end':
dt_end = datetime.fromtimestamp(int(val))
if dt_start:
jobs[jobid]['duration'] = diff_time(dt_start, dt_end)
jobs[jobid]['end_human'] = dt_end.strftime("%H:%M:%S")
else:
jobs[jobid][key] = val
# Overview board
running_jobs = []
finished_jobs = []
for jobid, job in jobs.items():
job['jobid'] = jobid
if 'end_human' in job.keys():
try:
payload = (jobid, job['jobname'], job['user'], job['start_human'], job['duration'], job['derived_ec'])
except KeyError:
if 'derived_ec' not in job.keys():
self._cfg._logger.warn("Jobid '%(jobid)s' has no 'derived_ec'!?" % job)
payload = (jobid, job['jobname'], job['user'], job['start_human'], job['duration'], "?")
else:
self._cfg._logger.error("Jobid '%s' has something missing... %s!?" % (job['jobid'], ",".join(job.keys())))
payload = (jobid, "?", "?", "?", "?", "?")
finished_jobs.append(payload)
else:
try:
payload = (jobid, job['jobname'], job['user'], job['start_human'])
except KeyError:
if 'jobname' not in job.keys():
self._cfg._logger.warn("Jobid '%(jobid)s' has no 'jobname" % job)
payload = (jobid, "?", job['user'], job['start_human'])
else:
self._cfg._logger.error("Jobid '%s' has something missing... %s!?" % (job['jobid'], ",".join(job.keys())))
payload = (jobid, "?", "?", "?")
running_jobs.append(payload)
if os.path.exists('/var/www/grafana/app/dashboards/slurm_%s.json' % jobid):
logging.debug("Dashboard for jobid '%(jobid)s' / '%(jobname)s' already existing" % job)
continue
logging.info("Create Dashboard for job %(jobid)s '%(jobname)s'" % job)
out = self._template['slurmjob'].render(**job)
with open(self._target['slurmjob'] % job, "w") as fd:
fd.write(out)
out = self._template['slurm'].render({'running': sorted(running_jobs),
'finished': reversed(sorted(finished_jobs)[-5:])})
with open(self._target['slurm'], "w") as fd:
fd.write(out)
def main():
""" main function """
options = None
if HAVE_DOCOPT:
options = docopt(__doc__, version='Test Script 0.1')
qcfg = QnibConfig(options)
slurmd = SlurmDash(qcfg)
slurmd.run()
if __name__ == "__main__":
main()
|
ChristianKniep/docker-grafana
|
opt/qnib/grafana/bin/dashboard.py
|
Python
|
mit
| 12,494
|
def list1():
def list2():
def list3():
def list4():
list1-[1,2,3]
list2=[1,2,3,4]
list3=[4,66,7]
list4=[66,77,7,4]
diff1=list(set(list1)-set(list2))
diff2=list(set(list3)-set(list4))
return diff1
return diff2
|
evansmwangi/GitDay0
|
Day3labs/missingnumber.py
|
Python
|
gpl-3.0
| 362
|
"""Remove any personally identifying information from the database"""
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django_openid_auth.models import UserOpenID
from rest_framework.authtoken.models import Token
from reversion.models import Revision
from games.models import (
Installer,
InstallerIssue,
InstallerIssueReply,
InstallerHistory,
Screenshot,
GameSubmission,
)
from accounts.models import User
from common.models import Upload, News
class Command(BaseCommand):
"""Django command to anonymize the database"""
@staticmethod
def get_main_user():
"""Return the only user remaining in the DB"""
return User.objects.first()
@staticmethod
def delete_tokens():
"""Remove all auth tokens (OpenID, DRF, ...)"""
res = UserOpenID.objects.all().delete()
print("Deleted %s openids" % res[0])
res = Token.objects.all().delete()
print("Deleted %s tokens" % res[0])
res = LogEntry.objects.all().delete()
print("Deleted %s log entries" % res[0])
def handle(self, *args, **kwargs):
if not settings.DEBUG:
raise RuntimeError("Never run this in production")
self.delete_tokens()
user = self.get_main_user()
res = InstallerIssue.objects.all().update(submitted_by=user)
print("Updated %s issues" % res)
res = InstallerIssueReply.objects.all().update(submitted_by=user)
print("Updated %s issue replies" % res)
res = InstallerHistory.objects.all().update(user=user)
print("Updated %s installer history" % res)
res = Installer.objects.all().update(user=user)
print("Updated %s installers" % res)
res = InstallerHistory.objects.all().update(user=user)
print("Updated %s installer history" % res)
res = GameSubmission.objects.all().update(user=user)
print("Updated %s game submissions" % res)
res = Screenshot.objects.all().update(uploaded_by=user)
print("Updated %s screenshots" % res)
res = Upload.objects.all().update(uploaded_by=user)
print("Updated %s uploads" % res)
res = News.objects.all().update(user=user)
print("Updated %s news" % res)
res = Revision.objects.all().update(user=user)
print("Updated %s revisions" % res)
res = User.objects.exclude(pk=user.id).delete()
print("Deleted %s users" % res[0])
default_password = "lutris"
user.set_password(default_password)
user.username = "lutris"
user.email = "root@localhost"
user.website = ""
user.steamid = ""
user.save()
print("Password for user %s is now %s" % (user, default_password))
|
lutris/website
|
common/management/commands/anon_db.py
|
Python
|
agpl-3.0
| 2,839
|
"""rename this to test_assumptions.py when the old assumptions system is deleted"""
from sympy.core import symbols
from sympy.assumptions import Assume, global_assumptions, Predicate
from sympy.assumptions.assume import eliminate_assume
from sympy.printing import pretty
from sympy.assumptions.ask import Q
from sympy.utilities.pytest import XFAIL
def test_assume():
x = symbols('x')
assump = Assume(x, 'integer')
assert assump.expr == x
assert assump.key == Q.integer
def test_Predicate_wraps_Assume():
x = symbols('x')
integer = Predicate('integer')
assump = integer(x)
assert (assump.expr, assump.key) == (x, integer)
assump = Assume(x, integer)
assert (assump.expr, assump.key) == (x, integer)
def test_False():
"""Test Assume object with False keys"""
x = symbols('x')
assump = Assume(x, 'integer', False)
assert assump == ~Assume(x, 'integer')
def test_equal():
"""Test for equality"""
x = symbols('x')
assert Assume(x, 'positive', True) == Assume(x, 'positive', True)
assert Assume(x, 'positive', True) != Assume(x, 'positive', False)
assert Assume(x, 'positive', False) == Assume(x, 'positive', False)
@XFAIL #TODO: handle printing
def test_pretty():
x = symbols('x')
assert pretty(Assume(x, 'positive')) == "Assume(x, 'positive')"
def test_eliminate_assumptions():
a, b = map(Predicate, symbols('a,b'))
x, y = symbols('x,y')
assert eliminate_assume(Assume(x, a)) == a
assert eliminate_assume(Assume(x, a), symbol=x) == a
assert eliminate_assume(Assume(x, a), symbol=y) == None
assert eliminate_assume(Assume(x, a, False)) == ~a
assert eliminate_assume(Assume(x, a), symbol=y) == None
assert eliminate_assume(Assume(x, a) | Assume(x, b)) == a | b
assert eliminate_assume(Assume(x, a) | Assume(x, b, False)) == a | ~b
def test_global():
"""Test for global assumptions"""
x, y = symbols('x,y')
global_assumptions.add(Assume(x>0))
assert Assume(x>0) in global_assumptions
global_assumptions.remove(Assume(x>0))
assert not Assume(x>0) in global_assumptions
# same with multiple of assumptions
global_assumptions.add(Assume(x>0), Assume(y>0))
assert Assume(x>0) in global_assumptions
assert Assume(y>0) in global_assumptions
global_assumptions.clear()
assert not Assume(x>0) in global_assumptions
assert not Assume(y>0) in global_assumptions
|
pernici/sympy
|
sympy/assumptions/tests/test_assumptions_2.py
|
Python
|
bsd-3-clause
| 2,430
|
import logging
import os
import jieba
import Taiba
class Matcher(object):
"""
比對使用者輸入的句子與目標語料集,
回傳語料集中最相似的一個句子。
"""
def __init__(self, segLib="Taiba"):
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
self.titles = [] # 欲進行匹配的所有標題
self.segTitles = [] # 斷好詞的標題
self.stopwords = set()
self.similarity = 1.
if segLib == "Taiba":
self.useTaiba = True
else:
self.useTaiba = False
def jiebaCustomSetting(self, dict_path, usr_dict_path):
jieba.set_dictionary(dict_path)
with open(usr_dict_path, 'r', encoding='utf-8') as dic:
for word in dic:
jieba.add_word(word.strip('\n'))
def TaibaCustomSetting(self, usr_dict):
with open(usr_dict, 'r', encoding='utf-8') as dic:
for word in dic:
Taiba.add_word(word.strip('\n'))
def loadStopWords(self, path):
with open(path, 'r', encoding='utf-8') as sw:
for word in sw:
self.stopwords.add(word.strip('\n'))
def loadTitles(self, path):
with open(path,'r',encoding='utf-8') as data:
self.titles = [line.strip('\n') for line in data]
def match(self, query):
"""
讀入使用者 query,若語料庫中存在相同的句子,便回傳該句子與標號
Args:
- query: 使用者的輸入
Return: (title,index)
- title: 最為相似的標題
- 該標題的索引編號
"""
result = None
for index, title in enumerate(self.titles):
if title == query:
return title,index
def getSimilarity(self):
return self.similarity
def wordSegmentation(self, string):
if self.useTaiba:
return Taiba.lcut(string,CRF=True)
else:
return jieba.cut(string,cut_all=True)
def TitlesSegmentation(self, cleanStopwords=False):
"""
將 self.titles 斷詞後的結果輸出,並儲存於 self.segTitles
Args:
- cleanStopwords: 是否要清除標題中的停用詞
"""
logging.info("正準備將 titles 斷詞")
count = 0
if not os.path.exists('data/SegTitles.txt'):
self.segTitles = []
for title in self.titles:
if cleanStopwords:
clean = [word for word in self.wordSegmentation(title)
if word not in self.stopwords]
self.segTitles.append(clean)
else:
self.segTitles.append(self.wordSegmentation(title))
count += 1
if count % 1000 == 0:
logging.info("已斷詞完前 %d 篇文章" % count)
with open('data/SegTitles.txt','w',encoding="utf-8") as seg_title:
for title in self.segTitles:
seg_title.write(' '.join(title) + '\n')
logging.info("完成標題斷詞,結果已暫存至 data/SegTitles.txt")
else:
logging.info("偵測到先前的標題斷詞結果,讀取中...")
with open('data/SegTitles.txt','r',encoding="utf-8") as seg_title:
for line in seg_title:
line = line.strip('\n')
seg = line.split()
if cleanStopwords:
seg = [word for word in seg
if word not in self.stopwords]
self.segTitles.append(seg)
logging.info("%d 個標題已完成載入" % len(self.segTitles))
|
nickbanana/chatbot-backend
|
Chatbot/QuestionAnswering/Matcher/matcher.py
|
Python
|
gpl-3.0
| 3,827
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetQuestionnaireResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'questionnaire': 'QuestionnaireInfo'
}
self.questionnaire = None # QuestionnaireInfo
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/GetQuestionnaireResult.py
|
Python
|
apache-2.0
| 958
|
# -*- coding: utf-8 -*-
# © 2016 Daniel Reis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
from openerp import SUPERUSER_ID
from openerp import tools
# Extended name search is only used on some operators
ALLOWED_OPS = set(['ilike', 'like'])
@tools.ormcache(skiparg=0)
def _get_rec_names(self):
model = self.env['ir.model'].search(
[('model', '=', str(self._model))])
rec_name = [self._rec_name] or []
other_names = model.name_search_ids.mapped('name')
return rec_name + other_names
def _extend_name_results(self, domain, results, limit):
result_count = len(results)
if result_count < limit:
domain += [('id', 'not in', [x[0] for x in results])]
recs = self.search(domain, limit=limit - result_count)
results.extend(recs.name_get())
return results
class ModelExtended(models.Model):
_inherit = 'ir.model'
name_search_ids = fields.Many2many(
'ir.model.fields',
string='Name Search Fields')
def _register_hook(self, cr, ids=None):
def make_name_search():
@api.model
def name_search(self, name='', args=None,
operator='ilike', limit=100):
# Perform standard name search
res = name_search.origin(
self, name=name, args=args, operator=operator, limit=limit)
enabled = self.env.context.get('name_search_extended', True)
# Perform extended name search
if enabled and operator in ALLOWED_OPS:
# Support a list of fields to search on
all_names = _get_rec_names(self)
# Try regular search on each additional search field
for rec_name in all_names[1:]:
domain = [(rec_name, operator, name)]
res = _extend_name_results(self, domain, res, limit)
# Try ordered word search on each of the search fields
for rec_name in all_names:
domain = [(rec_name, operator, name.replace(' ', '%'))]
res = _extend_name_results(self, domain, res, limit)
# Try unordered word search on each of the search fields
for rec_name in all_names:
domain = [(rec_name, operator, x)
for x in name.split() if x]
res = _extend_name_results(self, domain, res, limit)
return res
return name_search
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for model in self.browse(cr, SUPERUSER_ID, ids):
Model = self.pool.get(model.model)
if Model:
Model._patch_method('name_search', make_name_search())
return super(ModelExtended, self)._register_hook(cr)
|
Benniphx/server-tools
|
base_name_search_improved/models/ir_model.py
|
Python
|
agpl-3.0
| 2,973
|
#!/usr/bin/env python
# pylint: disable=C0103,W0622
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015 Leandro Toledo de Souza <leandrotoeldodesouza@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram User"""
from telegram import TelegramObject
class User(TelegramObject):
"""This object represents a Telegram User.
Attributes:
id (int):
first_name (str):
last_name (str):
username (str):
Args:
id (int):
first_name (str):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
last_name (Optional[str]):
username (Optional[str]):
"""
def __init__(self,
id,
first_name,
**kwargs):
# Required
self.id = int(id)
self.first_name = first_name
# Optionals
self.last_name = kwargs.get('last_name', '')
self.username = kwargs.get('username', '')
@property
def name(self):
"""str: """
if self.username:
return '@%s' % self.username
if self.last_name:
return '%s %s' % (self.first_name, self.last_name)
return self.first_name
@staticmethod
def de_json(data):
"""
Args:
data (str):
Returns:
telegram.User:
"""
if not data:
return None
return User(**data)
|
AndrewSamokhvalov/python-telegram-bot
|
telegram/user.py
|
Python
|
gpl-3.0
| 2,099
|
from flask import Flask, request, send_from_directory
from time import sleep
app = Flask(__name__, static_url_path='/public')
@app.route('/')
def root():
return send_from_directory('./', 'index.html')
@app.route('/feedback.js')
def script():
return send_from_directory('./', 'feedback.js')
@app.route('/feedback.css')
def style():
return send_from_directory('./', 'feedback.css')
@app.route('/icons.png')
def icons():
return send_from_directory('./', 'icons.png')
@app.route('/listener', methods=['GET', 'POST'])
def listener():
sleep(30)
return '1'
if __name__ == '__main__':
app.run()
|
deviantintegral/feedback
|
src/listener.py
|
Python
|
mit
| 622
|
#!/usr/bin/python2
import optparse
import os
import shutil
import stat
import subprocess
import sys
from builds.GpBuild import GpBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
os.chmod('/bin/ping', os.stat('/bin/ping').st_mode | stat.S_ISUID)
if status:
return status
def copy_output():
for dirpath, dirs, diff_files in os.walk('gpdb_src/'):
if 'regression.diffs' in diff_files:
diff_file = dirpath + '/' + 'regression.diffs'
print( "======================================================================\n" +
"DIFF FILE: " + diff_file+"\n" +
"----------------------------------------------------------------------")
with open(diff_file, 'r') as fin:
print fin.read()
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def configure():
p_env = os.environ.copy()
p_env['LD_LIBRARY_PATH'] = '/usr/local/gpdb/lib'
p_env['CFLAGS'] = '-I/usr/local/gpdb/include'
p_env['CPPFLAGS'] = '-I/usr/local/gpdb/include'
p_env['LDFLAGS'] = '-L/usr/local/gpdb/lib'
return subprocess.call(["./configure",
"--enable-mapreduce",
"--with-gssapi",
"--with-perl",
"--with-libxml",
"--with-python",
"--with-libs=/usr/local/gpdb/lib",
"--with-includes=/usr/local/gpdb/include",
"--prefix=/usr/local/gpdb"], env=p_env, shell=True, cwd="gpdb_src")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'planner'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
gp_build = GpBuild(options.mode)
status = install_gpdb(options.gpdb_name)
if status:
return status
status = configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = gp_build.unit_test()
if status:
return status
if os.getenv("TEST_SUITE", "icg") == 'icw':
status = gp_build.install_check('world')
else:
status = gp_build.install_check()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
|
Chibin/gpdb
|
concourse/scripts/test_gpdb.py
|
Python
|
apache-2.0
| 3,133
|
import sys, os
import shutil
pymunk_src_path = "../chipmunk_src"
if len(sys.argv) > 1:
chipmunk_svn_path = sys.argv[1]
else:
chipmunk_svn_path = raw_input('Enter path to chipmunk source')
def copyfiles(basepath, subpath=""):
path = os.path.join(basepath, subpath)
for fn in os.listdir(path):
fpath = os.path.join(path, fn)
if os.path.isfile(fpath) and fn[-2:] in (".c", ".h"):
dst = os.path.join(pymunk_src_path, subpath)
shutil.copy(fpath, dst)
elif os.path.isdir(fpath) and fn[0] != ".":
copyfiles(basepath, os.path.join(subpath, fn))
copyfiles(os.path.join(chipmunk_svn_path, "src"))
copyfiles(chipmunk_svn_path, "include")
for (dirpath, x, fns) in os.walk(pymunk_src_path):
for fn in fns:
fpath = os.path.join(dirpath, fn)
if os.path.isfile(fpath) and fn[-2:] == ".o":
os.remove(fpath)
print "Remember to update svn version string of chipmunk!"
|
cfobel/python___pymunk
|
tools/update_chipmunk_src.py
|
Python
|
mit
| 1,037
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import inspect
from sqlalchemy.orm import backref, relationship
from a10_neutron_lbaas.db import model_base as models
LOG = logging.getLogger(__name__)
class A10ScalingGroup(models.A10Base):
"""A10 Scaling Group - container of switch and workers"""
__tablename__ = u'a10_scaling_groups'
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=True)
scaling_policy = relationship('A10ScalingPolicy', backref='scaling_groups')
switches = relationship('A10ScalingGroupSwitch')
workers = relationship('A10ScalingGroupWorker')
members = relationship('A10ScalingGroupMember', backref='scaling_group')
__mapper_args__ = {
'polymorphic_identity': __tablename__
}
class A10ScalingGroupBinding(models.A10Base):
__tablename__ = u'a10_scaling_group_bindings'
id = sa.Column(sa.String(36),
primary_key=True,
nullable=False,
default=models._uuid_str)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
scaling_group = relationship(A10ScalingGroup, backref='bindings')
lbaas_loadbalancer_id = sa.Column(sa.String(36),
unique=True,
nullable=False)
class A10ScalingGroupMember(models.A10Base):
"""A10 Scaling Group Member - switch/worker depending on 'role'"""
__tablename__ = "a10_scaling_group_members"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
type = sa.Column(sa.String(50), nullable=False)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255), nullable=False)
api_version = sa.Column(sa.String(12), nullable=False)
username = sa.Column(sa.String(255), nullable=False)
password = sa.Column(sa.String(255), nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
port = sa.Column(sa.Integer, nullable=False)
nova_instance_id = sa.Column(sa.String(36), nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
'polymorphic_on': type
}
def add_virtual_server(self, neutron_id, **kwargs):
vs = A10ScalingGroupMemberVirtualServer.create(
neutron_id=neutron_id,
**kwargs)
self.virtual_servers.append(vs)
return vs
def get_virtual_server(self, neutron_id):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServer).\
filter_by(member_id=self.id, neutron_id=neutron_id).\
first()
def delete_virtual_server(self, neutron_id):
vs = self.get_virtual_server(neutron_id)
if vs:
inspect(self).session.delete(vs)
class A10ScalingGroupWorker(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_workers"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupSwitch(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_switches"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupMemberVirtualServer(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_servers"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
member_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
nullable=False)
member = relationship('A10ScalingGroupMember',
backref=backref('virtual_servers', cascade='all, delete-orphan'))
neutron_id = sa.Column(sa.String(36),
nullable=False)
ip_address = sa.Column(sa.String(50), nullable=False)
interface_ip_address = sa.Column(sa.String(50), nullable=True)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
def add_port(self, port, **kwargs):
vs = A10ScalingGroupMemberVirtualServerPort.create(
port=port,
**kwargs)
self.ports.append(vs)
return vs
def get_port(self, port):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServerPort).\
filter_by(virtual_server_id=self.id, port=port).\
first()
def delete_port(self, port):
port = self.get_port(port)
if port:
inspect(self).session.delete(port)
class A10ScalingGroupMemberVirtualServerPort(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_server_ports"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
virtual_server_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_member_virtual_servers.id'),
nullable=False)
virtual_server = relationship('A10ScalingGroupMemberVirtualServer',
backref=backref('ports', cascade='all, delete-orphan'))
port = sa.Column(sa.Integer,
nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
class A10ScalingPolicy(models.A10Base):
__tablename__ = "a10_scaling_policies"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
cooldown = sa.Column(sa.Integer, nullable=False)
min_instances = sa.Column(sa.Integer, nullable=False)
max_instances = sa.Column(sa.Integer, nullable=True)
reactions = relationship('A10ScalingPolicyReaction',
order_by="A10ScalingPolicyReaction.position",
collection_class=ordering_list('position'),
backref='policy')
def scaling_group_ids(self):
return [sg.id for sg in self.scaling_groups]
class A10ScalingPolicyReaction(models.A10Base):
__tablename__ = "a10_scaling_policy_reactions"
# A surrogate key is required by ordering_list
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=False)
position = sa.Column(sa.Integer,
nullable=False)
alarm_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_alarms.id'),
nullable=False)
action_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_actions.id'),
nullable=False)
alarm = relationship('A10ScalingAlarm', backref='reactions')
action = relationship('A10ScalingAction', backref='reactions')
class A10ScalingAlarm(models.A10Base):
__tablename__ = "a10_scaling_alarms"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
aggregation = sa.Column(sa.String(50), nullable=False)
measurement = sa.Column(sa.String(50), nullable=False)
operator = sa.Column(sa.String(50), nullable=False)
threshold = sa.Column(sa.Float(), nullable=False)
unit = sa.Column(sa.String(50), nullable=False)
period = sa.Column(sa.Integer, nullable=False)
period_unit = sa.Column(sa.String(50), nullable=False)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
class A10ScalingAction(models.A10Base):
__tablename__ = "a10_scaling_actions"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
action = sa.Column(sa.String(50), nullable=False)
amount = sa.Column(sa.Integer)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
|
dougwig/a10-neutron-lbaas
|
a10_neutron_lbaas/db/models/scaling_group.py
|
Python
|
apache-2.0
| 10,831
|
import datetime
from django import forms
from .models import Profile, Event
class EventForm(forms.Form):
event = forms.ModelChoiceField(queryset=Event.objects.filter(event_date__lte=datetime.datetime.today()))
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for fieldname in self.fields:
self.fields[fieldname].help_text = None
class Meta:
model = Profile
|
endthestart/photocontest
|
photocontest/photocontest/forms.py
|
Python
|
mit
| 486
|
from pylab import *
import numpy as np
data = loadtxt("analy")
r = data[:,0]
r = r /(1.543*10.0**20.0)
vc = data[:,1]
sigma2 = data[:,2]
data = loadtxt("ai05")
ai = data[:,1]
data = loadtxt("bh")
bh = data[:,1]
plot(np.log10((r)), np.log10(ai), label="Numerical beta = 0.5")
plot(np.log10(r),np.log10(bh), label ="Numerical with black hole 10%")
#plot(np.log10(r), (np.log10(sigma2)-np.log10(vc)), label = "Difference between black hole and no black hole")
xlabel('log10 Radius (Log10(R_c)) R_c = 5 Kpc')
ylabel('log10 Velocity dispersion (log10(m/s))')
title('Comparison of anisotropic to black hole')
xlim([-2,2])
legend(loc = 3)
show()
|
ZacByrne/galaxy-sim
|
plotaibh.py
|
Python
|
gpl-3.0
| 645
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = 'Bruno Adelé <bruno@adele.im>'
__copyright__ = 'Copyright (C) 2015 Bruno Adelé'
__description__ = """Unittest"""
__license__ = 'GPLv3'
import os
import sys
import shutil
from cStringIO import StringIO
import unittest
import git
from gitcheck import gitcheck
GITROOT = '/tmp/gitcheck-unittest'
def setUpModule():
# Get projects
get_github_projects("gitcheck", "https://github.com/badele/gitcheck.git")
get_github_projects("serialkiller", "https://github.com/badele/serialkiller.git")
get_github_projects("fabrecipes", "https://github.com/badele/fabrecipes.git")
# Create empty repository
git.Git().init('%s/%s' % (GITROOT, 'zempty'))
def get_github_projects(projectname, projecturl):
# Preparing the git directory
gitdir = "%s/%s" % (GITROOT, projectname)
if os.path.exists(gitdir):
shutil.rmtree(gitdir)
os.makedirs(gitdir)
os.chdir(GITROOT)
# Get a git projects
print "Get git %s project" % projectname
git.Git().clone(projecturl)
class TestPackages(unittest.TestCase):
def setUp(self):
# Redirect stdout
self.output = StringIO()
self.saved_stdout = sys.stdout
sys.stdout = self.output
def tearDown(self):
# Reset stdout
self.output.close()
sys.stdout = self.saved_stdout
def test_searchRepositories(self):
os.chdir(GITROOT)
repos = gitcheck.searchRepositories()
self.assertEqual(repos[0], '%s/fabrecipes' % GITROOT)
self.assertEqual(repos[1], '%s/gitcheck' % GITROOT)
self.assertEqual(repos[2], '%s/serialkiller' % GITROOT)
self.assertEqual(repos[3], '%s/zempty' % GITROOT)
def test_gitcheck(self):
os.chdir(GITROOT)
defaulttheme = ""
gitcheck.colortheme = {
'default': defaulttheme,
'prjchanged': defaulttheme,
'prjremote': defaulttheme,
'prjname': defaulttheme,
'reponame': defaulttheme,
'branchname': defaulttheme,
'fileupdated': defaulttheme,
'remoteto': defaulttheme,
'committo': defaulttheme,
'commitinfo': defaulttheme,
'commitstate': defaulttheme,
'bell': defaulttheme,
'reset': defaulttheme,
}
gitcheck.gitcheck()
output = sys.stdout.getvalue().strip()
lines = output.split('\n')
self.assertEqual(lines[0], 'fabrecipes/master ')
self.assertEqual(lines[1], 'gitcheck/master ')
self.assertEqual(lines[2], 'serialkiller/master ')
self.assertEqual(lines[3], 'zempty/master')
if __name__ == "__main__":
unittest.main(verbosity=2)
|
tmartinfr/gitcheck
|
tests.py
|
Python
|
gpl-3.0
| 2,753
|
# -*- coding: utf-8 -*-
#
# nimblenet documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 14 08:11:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nimblenet'
copyright = u'2016, Jørgen Grimnes'
author = u'Jørgen Grimnes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2'
# The full version, including alpha/beta/rc tags.
release = u'0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'nimblenet v0.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'nimblenetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nimblenet.tex', u'nimblenet Documentation',
u'Jørgen Grimnes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nimblenet', u'nimblenet Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nimblenet', u'nimblenet Documentation',
author, 'nimblenet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
jorgenkg/python-neural-network
|
docs/source/conf.py
|
Python
|
bsd-2-clause
| 9,824
|
import ckan.plugins.toolkit as toolkit
import ckan.lib.helpers as helpers
import ckan.plugins as p
from ckan.lib.base import BaseController, response, request
import json
import db
import Levenshtein
from unidecode import unidecode
c = p.toolkit.c
render = p.toolkit.render
class TagmanagerController(BaseController):
#index function for display form to load datasets for managing their relations
def index(self):
return render('tagmanager/index.html')
def edit(self):
return render('tagmanager/edit.html')
def index_process_suggestions(self):
return render('tagmanager/index_process_suggestions.html')
def merge_0(self):
return render('tagmanager/index_merge_0.html')
def merge_1(self):
return render('tagmanager/index_merge_1.html')
def merge_2(self):
return render('tagmanager/index_merge_2.html')
def merge_form(self):
return render('tagmanager/merge_form.html')
def merge_confirm(self):
return render('tagmanager/merge_confirm.html')
def delete_confirm(self):
return render('tagmanager/delete_confirm.html')
def delete(self):
p.toolkit.get_action('tag_delete')({},{'id': request.POST['tag']})
return render('tagmanager/index.html')
def merge(self):
"assign all elements tagged with tag2 with tag1; delete tag2"
tag2_datasets = p.toolkit.get_action('tag_show')({},{'id' : request.POST['tag2'], 'include_datasets': True})
for ds in tag2_datasets['packages']:
dataset = p.toolkit.get_action('package_show')({},{'id': ds['id'] })
dataset['tags'].append(p.toolkit.get_action('tag_show')({},{'id':request.POST['tag1']}))
p.toolkit.get_action('package_update')({},dataset)
p.toolkit.get_action('tag_delete')({},{'id': request.POST['tag2']})
#p.toolkit.redirect_to(controller='tagmanager', action='index')
return render('tagmanager/index.html')
def merge_do(self):
"assign all elements tagged with tag2 with tag1; delete tag2"
merges = request.POST.getall('merge')
for m in merges:
self.merge_from_suggestion(m,request.POST["select_" + m])
return render('tagmanager/index.html')
def merge_from_suggestion(self, merge_id, tag_maintain):
"assign all elements tagged with tag2 with tag1; delete tag2"
merge_object = db.TagMergeSuggestion.by_id(merge_id)
if merge_object.tag_id_1 == tag_maintain:
tag_delete = merge_object.tag_id_2
else:
tag_delete = merge_object.tag_id_1
tag_delete_datasets = p.toolkit.get_action('tag_show')({},{'id' : tag_delete, 'include_datasets': True})
for ds in tag_delete_datasets['packages']:
dataset = p.toolkit.get_action('package_show')({},{'id': ds['id'] })
dataset['tags'].append(p.toolkit.get_action('tag_show')({},{'id':tag_maintain}))
p.toolkit.get_action('package_update')({},dataset)
merge_objects = db.TagMergeSuggestion.by_tag_id(tag_delete)
for m in merge_objects:
m.delete()
m.commit()
p.toolkit.get_action('tag_delete')({},{'id': tag_delete})
return
def save_merge_suggestions(self,suggestion_type='all'):
suggestion_type = request.POST['method']
if suggestion_type == 'all':
suggestions = self.get_merge_suggestions(0);
for s in suggestions:
session = db.TagMergeSuggestion(s[0],s[1],0)
session.save()
suggestions = self.get_merge_suggestions(1);
for s in suggestions:
session = db.TagMergeSuggestion(s[0],s[1],1)
session.save()
suggestions = self.get_merge_suggestions(2);
for s in suggestions:
session.db.TagMergeSuggestion(s[0],s[1],2)
session.save()
else:
suggestions = self.get_merge_suggestions(suggestion_type);
for s in suggestions:
session = db.TagMergeSuggestion(s[0],s[1],suggestion_type)
session.save()
return render('tagmanager/index.html')
def get_merge_suggestions(self,suggestion_type=0, limit=None):
from nltk.corpus import wordnet as wn
print suggestion_type
tags = p.toolkit.get_action('tag_list')({},{'all_fields' : True})
T = len(tags)
merge_list = []
for t in range(0,T-1):
if ((suggestion_type == '0') or (suggestion_type == 'all')):
for s in range(t+1, T-1):
if unidecode(tags[s]['name'].lower()) == unidecode(tags[t]['name'].lower()):
merge_list.append([tags[s]['id'],tags[t]['id']])
if (suggestion_type == '1') or (suggestion_type == 'all'):
stri = tags[t]['name']
if ([int(stri[i]) for i in range(0,len(stri)) if stri[i].isdigit()] == []) and (len(stri) > 3):
for s in range(t,T-1):
strj = tags[s]['name']
if (len(strj) > 3) and ([int(strj[i]) for i in range(0,len(strj)) if strj[i].isdigit()] == []):
if unidecode(tags[s]['name'].lower()) != unidecode(tags[t]['name'].lower()):
d = Levenshtein.distance(tags[t]['name'],tags[s]['name'])
if d < 3:
merge_list.append([tags[s]['id'],tags[t]['id']])
if (suggestion_type == '2') or (suggestion_type == 'all'):
stri = tags[t]['name']
if ([int(stri[i]) for i in range(0,len(stri)) if stri[i].isdigit()] == []) and (len(stri) > 3):
syn1=wn.synsets(stri)
if syn1 != []:
for s in range(t,T-1):
strj = tags[s]['name']
if (len(strj) > 3) and ([int(strj[i]) for i in range(0,len(strj)) if strj[i].isdigit()] == []):
syn2=wn.synsets(strj)
if syn2 != []:
if unidecode(tags[s]['name'].lower()) != unidecode(tags[t]['name'].lower()):
if Levenshtein.distance(tags[t]['name'],tags[s]['name']) > 3:
b = max(syn2[i].wup_similarity(syn1[0]) for i in range(len(syn2)))
if b >= 1:
merge_list.append([tags[s]['id'],tags[t]['id']])
return merge_list
|
alantygel/ckanext-tagmanager
|
ckanext/tagmanager/controller.py
|
Python
|
agpl-3.0
| 5,591
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la configuration par défaut du module 'format'"""
cfg_charte = r"""
# Ce fichier contient la configuration du module primaire format.
# Il paramètre la "charte graphique" du moteur (raccourcis de formatage et
# quelques options).
# /!\ Ces valeurs sont indispensables au bon fonctionnement du moteur !
# Les couleurs disponibles :
# |nr| - noir
# |rg| - rouge
# |vr| - vert
# |mr| - marron
# |bl| - bleu
# |mg| - magenta
# |cy| - cyan
# |gr| - gris
# |grf| - gris foncé
# |rgc| - rouge clair
# |vrc| - vert clair
# |jn| - jaune
# |blc| - bleu clair
# |mgc| - magenta clair
# |cyc| - cyan clair
# |bc| - blanc
## Raccourcis de formatage
# Ces raccourcis doivent être utilisées tels quels dans un message envoyé au
# client, suivis de |ff| pour arrêter la coloration. Par exemple :
# 'Entrez |cmd|/|ff| pour vous identifier.'
# Couleur des titres
# Couleur utilisée principalement sur les titres des contextes de création.
# Raccourci correspondant : |tit|
couleur_titre = "|mr|"
# Couleur des commandes
# Les commandes à entrer telles quelles, sont signalées au client par la
# couleur paramétrée ci-dessous.
# Raccourci correspondant : |cmd|
couleur_cmd = "|grf|"
# Couleur des entrées
# Contrairement au raccourci précédent, celui-ci correspond indications de
# valeurs à entrer par le client. Par exemple :
# 'Entrez |ent|votre mot de passe|ff| pour vous connecter."
# Par défaut, la couleur est la même, à vous de la changer si vous souhaitez
# établir une distinction.
# Raccourci correspondant : |ent|
couleur_entree = "|grf|"
# Couleur des messages importants
# Deux niveaux de messages, les messages de warning et les messages d'erreur.
# Raccourcis correspondantes : |att| et |err|
couleur_attention = "|vr|"
couleur_erreur = "|rg|"
# Si vous voulez ajouter des raccourcis de mise en forme, complétez ce fichier
# (sans oublier de documenter) et le dico dans primaires.format.fonctions.
# Ensuite yapuka utiliser vos raccourcis tout neufs dans un quelconque contexte.
## Options de mise en forme
# Couleur du prompt
# Ceci est la couleur du prompt, surtout utilisée lors de l'inscription.
couleur_prompt = "|cy|"
# Préfixe du prompt
# Ce préfixe est placé devant le prompt, surtout lors de l'inscription.
prefixe_prompt = "* "
"""
|
stormi/tsunami
|
src/primaires/format/config.py
|
Python
|
bsd-3-clause
| 3,909
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import logging
from django.contrib.auth import get_user_model
from django.db.transaction import atomic
from django.utils.translation import gettext_lazy as _
from typing import TYPE_CHECKING
from shuup.core.models import Shop, Supplier
from shuup.core.tasks import TaskResult
from shuup.importer.exceptions import ImporterError
from shuup.importer.utils.importer import FileImporter, ImportMode
if TYPE_CHECKING: # pragma: no cover
from shuup.importer.importing import DataImporter
LOGGER = logging.getLogger(__name__)
def import_file(importer, import_mode, file_name, language, shop_id, supplier_id=None, user_id=None, mapping=None):
shop = Shop.objects.get(pk=shop_id)
supplier = None
user = None
if supplier_id:
supplier = Supplier.objects.filter(pk=supplier_id).first()
if user_id:
user = get_user_model().objects.get(pk=user_id)
# convert to enum
import_mode = ImportMode(import_mode)
file_importer = FileImporter(
importer, import_mode, file_name, language, mapping=mapping, shop=shop, supplier=supplier, user=user
)
try:
file_importer.prepare()
with atomic():
file_importer.import_file()
importer_instance = file_importer.importer # type: DataImporter
result = dict(
other_log_messages=[str(msg) for msg in importer_instance.other_log_messages],
log_messages=[str(msg) for msg in importer_instance.log_messages],
)
new_objects = []
updated_objects = []
for new_object in importer_instance.new_objects:
new_objects.append(
{"model": f"{new_object._meta.app_label}.{new_object._meta.model_name}", "pk": new_object.pk}
)
for updated_object in importer_instance.updated_objects:
updated_objects.append(
{
"model": f"{updated_object._meta.app_label}.{updated_object._meta.model_name}",
"pk": updated_object.pk,
}
)
result["new_objects"] = new_objects
result["updated_objects"] = updated_objects
return TaskResult(result=result)
except ImporterError as error:
error_log = ", ".join(error.messages)
return TaskResult(error_log=error_log)
except Exception:
LOGGER.exception("Failed to import the file.")
return TaskResult(error_log=_("Unexpected error while trying to import the file."))
|
shoopio/shoop
|
shuup/importer/tasks.py
|
Python
|
agpl-3.0
| 2,821
|
# -*- coding: utf-8 -*-
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, basestring):
return get_style_by_name(style)
return style
class Formatter(object):
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
self.encoding = options.get('outencoding', None) or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)
|
markandrewj/pygments
|
pygments/formatter.py
|
Python
|
bsd-2-clause
| 2,790
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
import warnings
from django.core.paginator import Page
from django.db.models import Sum
from rest_framework.serializers import (
HyperlinkedModelSerializer, HyperlinkedIdentityField, Serializer
)
from trex.models.project import Project, Entry, Tag, ProjectUser
class UpdateDataSerializerMixin(object):
@property
def data(self):
"""
Returns the serialized data on the serializer.
"""
if self._data is None:
data = self.get_data()
self.update_data(data)
self._data = data
return self._data
def update_data(self, data):
"""
Update and add additional data here
"""
def get_data(self):
"""
Serializes an model instance or queryset to primitive data
"""
obj = self.object
if self.many is not None:
many = self.many
else:
many = hasattr(obj, '__iter__') and not isinstance(
obj, (Page, dict))
if many:
warnings.warn('Implicit list/queryset serialization '
'is deprecated. Use the `many=True` flag '
'when instantiating the serializer.',
DeprecationWarning, stacklevel=2)
if many:
return [self.to_native(item) for item in obj]
else:
return self.to_native(obj)
class ProjectSerializer(HyperlinkedModelSerializer):
class Meta:
model = Project
fields = ("url", "id", "name", "description", "active", "created")
class ProjectDetailSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the details of a Project
"""
entries = HyperlinkedIdentityField(view_name="project-entries-list")
tags = HyperlinkedIdentityField(view_name="project-tags-list")
users = HyperlinkedIdentityField(view_name="project-users-list")
class Meta:
model = Project
fields = ("id", "name", "description", "active", "created", "entries",
"tags", "users")
class EntryTagsSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the Tags of an Entry
"""
class Meta:
model = Tag
fields = ("url", "id", "name")
class EntryProjectSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the Project of an Entry
"""
class Meta:
model = Project
fields = ("url", "id", "name")
class EntryDetailSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the details of an Entry
"""
tags = EntryTagsSerializer(many=True)
project = EntryProjectSerializer()
class Meta:
model = Entry
fields = ("url", "id", "date", "duration", "description", "state",
"user", "created", "project", "workpackage", "tags")
class ProjectUserSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the Users of a Project
"""
class Meta:
model = ProjectUser
fields = ("id", "user_abbr")
class ProjectEntrySerializer(HyperlinkedModelSerializer):
"""
Serializer to show the Entries of a Project
"""
tags = EntryTagsSerializer(many=True)
user = ProjectUserSerializer()
class Meta:
model = Entry
fields = ("url", "id", "date", "duration", "description", "state",
"user", "created", "workpackage", "tags")
class ProjectEntrySumsSerializer(UpdateDataSerializerMixin, Serializer):
class Meta:
model = Entry
def get_data(self):
data = {}
sum_values = self.object.values("workpackage").annotate(
duration=Sum("duration")).order_by("workpackage")
workpackage_sums = []
for value in sum_values:
workpackage_sums.append({"name": value["workpackage"],
"duration": value["duration"]})
data["workpackage_sums"] = workpackage_sums
sum_values = self.object.values("tags__name").annotate(
duration=Sum("duration")).order_by("tags__name")
tags_sums = []
for value in sum_values:
tags_sums.append({"name": value["tags__name"],
"duration": value["duration"]})
data["tag_sums"] = tags_sums
data["sum"] = self.object.all().aggregate(d=Sum("duration"))["d"]
return data
class ProjectTagSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the Tags of a Project
"""
class Meta:
model = Tag
fields = ("id", "name", "description", "created")
class TagDetailSerializer(HyperlinkedModelSerializer):
"""
Serializer to show the details of a Tag
"""
class Meta:
model = Tag
fields = ("id", "project", "name", "description", "created")
|
bjoernricks/trex
|
trex/serializers.py
|
Python
|
mit
| 5,009
|
import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 300
long_nx = 30000
ny = 200
nt = 100
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt,))
randn_long = randn((long_nx,), frac_nan=0.1)
class Rolling:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn_xy),
"var2": (("x", "t"), randn_xt),
"var3": (("t",), randn_t),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
self.da_long = xr.DataArray(
randn_long, dims="x", coords={"x": np.arange(long_nx) * 0.1}
)
@parameterized(
["func", "center", "use_bottleneck"],
(["mean", "count"], [True, False], [True, False]),
)
def time_rolling(self, func, center, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
getattr(self.ds.rolling(x=window, center=center), func)().load()
@parameterized(
["func", "pandas", "use_bottleneck"],
(["mean", "count"], [True, False], [True, False]),
)
def time_rolling_long(self, func, pandas, use_bottleneck):
if pandas:
se = self.da_long.to_series()
getattr(se.rolling(window=window, min_periods=window), func)()
else:
with xr.set_options(use_bottleneck=use_bottleneck):
getattr(
self.da_long.rolling(x=window, min_periods=window), func
)().load()
@parameterized(
["window_", "min_periods", "use_bottleneck"], ([20, 40], [5, 5], [True, False])
)
def time_rolling_np(self, window_, min_periods, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce(
getattr(np, "nansum")
).load()
@parameterized(
["center", "stride", "use_bottleneck"], ([True, False], [1, 1], [True, False])
)
def time_rolling_construct(self, center, stride, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
self.ds.rolling(x=window, center=center).construct(
"window_dim", stride=stride
).sum(dim="window_dim").load()
class RollingDask(Rolling):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50})
self.da_long = self.da_long.chunk({"x": 10000})
class RollingMemory:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn_xy),
"var2": (("x", "t"), randn_xt),
"var3": (("t",), randn_t),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
class DataArrayRollingMemory(RollingMemory):
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_ndrolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.var1.rolling(x=10, y=4)
getattr(roll, func)()
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_1drolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.var3.rolling(t=100)
getattr(roll, func)()
class DatasetRollingMemory(RollingMemory):
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_ndrolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.rolling(x=10, y=4)
getattr(roll, func)()
@parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False]))
def peakmem_1drolling_reduce(self, func, use_bottleneck):
with xr.set_options(use_bottleneck=use_bottleneck):
roll = self.ds.rolling(t=100)
getattr(roll, func)()
|
pydata/xarray
|
asv_bench/benchmarks/rolling.py
|
Python
|
apache-2.0
| 4,603
|
import json
import socket
import time
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from werkzeug.wrappers import Response
app = Flask(__name__)
def timePassed(oldtime, membership):
if membership == 'lifetime':
timer = 15
elif membership == '10sec':
timer = 10
else:
timer = 29
if time.time() - oldtime > timer:
return {'status': True, 'timer': timer}
else:
return {'status': False, 'timer': timer}
def attackSend(membership, channel, message, type, which):
attackSocket = socket.socket()
attackSocket.connect(('localhost', 8080))
attackSocket.send("{0}\r\n".format(json.dumps({'membership': membership, 'channel': channel, 'message': message, 'type': type, 'which': which})).encode('utf-8'))
attackSocket.close()
lastattack = {}
@app.route('/')
@app.route('/<err>')
def index(err="Ready."):
#return 'TwitchTV has implemented something to stop our bots entirely, I am trying to make something to circumvent, no ETA right now. Sorry :( <br/><br/><b>Updates</b><br/><br/>Im 90% sure that it is proxies. They are not letting messages be seen depending on if the IP is banned. LMK if anyone has a cheap source of residentials.'
try:
code = request.args.get('code')
except:
code = ''
accounts = len(open('new_users.txt', 'r').readlines())
proxies = len(open('proxies.txt', 'r').readlines())
return render_template('index.html', accounts=accounts, proxies=proxies, err=err, code=code)
@app.route('/attack', methods=['POST'])
def attack():
noOldTime = False
valid = False
postData = request.form
try:
which = postData['which']
except:
which = 'numbers'
authorization_keys = json.loads(open('auth.json', 'r').read())
for code in authorization_keys['codes']:
if postData['auth'] == code[0]:
valid = True
membership = code[1]
else:
continue
if valid == True:
pass
else:
return redirect('/Invalid authorization code.?code={0}'.format(postData['auth']))
#attackSocket = socket.socket()
#attackSocket.connect(('localhost', 8080))
#attackSocket.send("{0}\r\n".format(json.dumps({'membership': membership, 'channel': postData['channel'].lower(), 'message': postData['message'], 'type': 'start'})).encode('utf-8'))
#attackSocket.close()
blackList = ['barbleslol']
if postData['channel'].lower() in blackList:
return redirect('/Channel BLACKLISTED.?code={0}'.format(postData['auth']))
try:
oldTime = lastattack[postData['auth']]
except:
noOldTime = True
print('no old attack')
if noOldTime:
lastattack[postData['auth']] = time.time()
attackSend(membership, postData['channel'].lower(), postData['message'], 'start', which)
return redirect('/Attack sent.?code={0}'.format(postData['auth']))
else:
hasTimePassed = timePassed(oldTime, membership)
if hasTimePassed['status']:
lastattack[postData['auth']] = time.time()
attackSend(membership, postData['channel'].lower(), postData['message'], 'start', which)
return redirect('/Attack sent.?code={0}'.format(postData['auth']))
else:
print('time not passed')
return redirect('/{0} second cooldown.?code={1}'.format(hasTimePassed['timer'], postData['auth']))
|
randomrandomlol123/fewgewgewgewhg
|
webapp.py
|
Python
|
gpl-3.0
| 3,198
|
# coding: utf-8
"""
Алгоритм обхода графа в ширину (BFS).
"""
# TODO: переписать
import queue
n, m = map(int, input().split()) # количество вершин и ребер в графе
adj = [[] for i in range(n)] # список смежности
used = [False for i in range(n)] # массив для хранения информации о пройденных и не пройденных вершинах
q = queue.Queue() # очередь для добавления вершин при обходе в ширину
# считываем граф, заданный списком ребер
for _ in range(m):
v, w = map(int, input().split())
v -= 1
w -= 1
adj[v].append(w)
adj[w].append(v)
def bfs(v):
"""
Функция обхода в ширину
:param v: вершина
:return:
"""
if used[v]: # если вершина является пройденной, то не производим из нее вызов процедуры
return
q.put(v) # начинаем обход из вершины v
used[v] = True # начинаем обход из вершины v
while not q.empty(): # пока в очереди есть хотя бы одна вершина
v = q.get() # извлекаем вершину из очереди
print(v + 1, end=' ')
for w in adj[v]: # запускаем обход из всех вершин, смежных с вершиной v
if used[w]: # если вершина уже была посещена, то пропускаем ее
continue
q.put(w) # добавляем вершину в очередь обхода
used[w] = True # помечаем вершину как пройденную
def run():
for v in range(n):
bfs(v)
|
vladworldss/algo
|
struct/graph/bfs.py
|
Python
|
gpl-3.0
| 1,905
|
#
# ImageView.py -- base class for the display of image files
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
import math
import logging
import threading
import sys, traceback
import time
from ginga.misc import Callback, Settings
from ginga import RGBMap, AstroImage, AutoCuts, ColorDist
from ginga import cmap, imap, trcalc, version
from ginga.canvas import coordmap
from ginga.canvas.types.layer import DrawingCanvas
from ginga.util import io_rgb
class ImageViewError(Exception):
pass
class ImageViewCoordsError(ImageViewError):
pass
class ImageViewNoDataError(ImageViewError):
pass
class ImageViewBase(Callback.Callbacks):
"""An abstract base class for displaying images represented by
numpy data arrays.
This class attempts to do as much of the image handling using numpy
array manipulations (even color and intensity mapping) so that only
a minimal mapping to a pixel buffer is necessary in concrete subclasses
that connect to an actual rendering surface.
"""
def __init__(self, logger=None, rgbmap=None, settings=None):
"""
Constructor for an image display object.
Parameters
----------
logger: logging-module compatible logger object, or None
a logger for tracing and debugging; if None, one will be created
rgbmap: a ginga.RGBMap.RGBMapper object, or None
an RGB mapper object; if None, one will be created
settings: a ginga.Settings.SettingGroup object, or None
viewer preferences; if None, one will be created
"""
Callback.Callbacks.__init__(self)
if logger is not None:
self.logger = logger
else:
self.logger = logging.Logger('ImageViewBase')
# Create settings and set defaults
if settings is None:
settings = Settings.SettingGroup(logger=self.logger)
self.t_ = settings
# RGB mapper
if rgbmap:
self.rgbmap = rgbmap
else:
rgbmap = RGBMap.RGBMapper(self.logger)
self.rgbmap = rgbmap
# for debugging
self.name = str(self)
# for color mapping
self.t_.addDefaults(color_map='gray', intensity_map='ramp',
color_algorithm='linear',
color_hashsize=65535)
for name in ('color_map', 'intensity_map', 'color_algorithm',
'color_hashsize'):
self.t_.getSetting(name).add_callback('set', self.cmap_changed_cb)
# Initialize RGBMap
cmap_name = self.t_.get('color_map', 'gray')
try:
cm = cmap.get_cmap(cmap_name)
except KeyError:
cm = cmap.get_cmap('gray')
rgbmap.set_cmap(cm)
imap_name = self.t_.get('intensity_map', 'ramp')
try:
im = imap.get_imap(imap_name)
except KeyError:
im = imap.get_imap('ramp')
rgbmap.set_imap(im)
hash_size = self.t_.get('color_hashsize', 65535)
rgbmap.set_hash_size(hash_size)
hash_alg = self.t_.get('color_algorithm', 'linear')
rgbmap.set_hash_algorithm(hash_alg)
rgbmap.add_callback('changed', self.rgbmap_cb)
# for scale
self.t_.addDefaults(scale=(1.0, 1.0))
for name in ['scale']:
self.t_.getSetting(name).add_callback('set', self.scale_cb)
# for pan
self.t_.addDefaults(pan=(1.0, 1.0), pan_coord='data')
for name in ['pan', ]: #'pan_coord'
self.t_.getSetting(name).add_callback('set', self.pan_cb)
# for cut levels
self.t_.addDefaults(cuts=(0.0, 0.0))
for name in ['cuts']:
self.t_.getSetting(name).add_callback('set', self.cut_levels_cb)
# for auto cut levels
self.autocuts_options = ('on', 'override', 'once', 'off')
self.t_.addDefaults(autocuts='override', autocut_method='zscale',
autocut_params={})
for name in ('autocut_method', 'autocut_params'):
self.t_.getSetting(name).add_callback('set', self.auto_levels_cb)
# for zooming
self.t_.addDefaults(zoomlevel=1.0, zoom_algorithm='step',
scale_x_base=1.0, scale_y_base=1.0,
interpolation='basic',
zoom_rate=math.sqrt(2.0))
for name in ('zoom_rate', 'zoom_algorithm',
'scale_x_base', 'scale_y_base'):
self.t_.getSetting(name).add_callback('set', self.zoomalg_change_cb)
self.t_.getSetting('interpolation').add_callback('set', self.interpolation_change_cb)
# max/min scaling
self.t_.addDefaults(scale_max=10000.0, scale_min=0.00001)
# autozoom options
self.autozoom_options = ('on', 'override', 'once', 'off')
self.t_.addDefaults(autozoom='on')
# image overlays
# TO BE DEPRECATED
self.t_.addDefaults(image_overlays=True)
self.t_.getSetting('image_overlays').add_callback('set', self.overlays_change_cb)
# for panning
self.autocenter_options = ('on', 'override', 'once', 'off')
self.t_.addDefaults(autocenter='on')
# for transforms
self.t_.addDefaults(flip_x=False, flip_y=False, swap_xy=False)
for name in ('flip_x', 'flip_y', 'swap_xy'):
self.t_.getSetting(name).add_callback('set', self.transform_cb)
# desired rotation angle
self.t_.addDefaults(rot_deg=0.0)
self.t_.getSetting('rot_deg').add_callback('set', self.rotation_change_cb)
# misc
self.t_.addDefaults(auto_orient=False,
defer_redraw=True, defer_lagtime=0.025)
# embedded image "profiles"
self.t_.addDefaults(profile_use_scale=False, profile_use_pan=False,
profile_use_cuts=False, profile_use_transform=False,
profile_use_rotation=False)
# ICC profile support
d = dict(icc_output_profile=None, icc_output_intent='perceptual',
icc_proof_profile=None, icc_proof_intent='perceptual',
icc_black_point_compensation=False)
self.t_.addDefaults(**d)
for key in d:
# Note: transform_cb will redraw enough to pick up ICC profile change
self.t_.getSetting(key).add_callback('set', self.transform_cb)
# Object that calculates auto cut levels
name = self.t_.get('autocut_method', 'zscale')
klass = AutoCuts.get_autocuts(name)
self.autocuts = klass(self.logger)
# PRIVATE IMPLEMENTATION STATE
# image window width and height (see set_window_dimensions())
self._imgwin_wd = 0
self._imgwin_ht = 0
self._imgwin_set = False
# desired size
self._desired_size = (300, 300)
# center (and reference) pixel in the screen image (in pixel coords)
self._ctr_x = 1
self._ctr_y = 1
# data indexes at the reference pixel (in data coords)
self._org_x = 0
self._org_y = 0
# offset from pan position (at center) in this array
self._org_xoff = 0
self._org_yoff = 0
# pan position
self._pan_x = 0.0
self._pan_y = 0.0
self.data_off = 0.5
# Origin in the data array of what is currently displayed (LL, UR)
self._org_x1 = 0
self._org_y1 = 0
self._org_x2 = 0
self._org_y2 = 0
# offsets in the screen image for drawing (in screen coords)
self._dst_x = 0
self._dst_y = 0
self._invertY = True
self._originUpper = True
# offsets in the screen image (in data coords)
self._off_x = 0
self._off_y = 0
# desired scale factors
self._scale_x = 1.0
self._scale_y = 1.0
# actual scale factors produced from desired ones
self._org_scale_x = 1.0
self._org_scale_y = 1.0
self._rgbarr = None
self._rgbarr2 = None
self._rgbobj = None
# optimization of redrawing
self.defer_redraw = self.t_.get('defer_redraw', True)
self.defer_lagtime = self.t_.get('defer_lagtime', 0.025)
self.time_last_redraw = time.time()
self._defer_whence = 0
self._defer_lock = threading.RLock()
self._defer_flag = False
self._hold_redraw_cnt = 0
self.suppress_redraw = SuppressRedraw(self)
self.img_bg = (0.2, 0.2, 0.2)
self.img_fg = (1.0, 1.0, 1.0)
self.orientMap = {
# tag: (flip_x, flip_y, swap_xy)
1: (False, True, False),
2: (True, True, False),
3: (True, False, False),
4: (False, False, False),
5: (True, False, True),
6: (True, True, True),
7: (False, True, True),
8: (False, False, True),
}
# our canvas
self.canvas = DrawingCanvas()
self.canvas.initialize(None, self, self.logger)
self.canvas.add_callback('modified', self.canvas_changed_cb)
self.canvas.set_surface(self)
self.canvas.ui_setActive(True)
# private canvas for drawing
self.private_canvas = self.canvas
# handle to image object on the image canvas
self._imgobj = None
self._canvas_img_tag = '__image'
self.coordmap = {
'canvas': coordmap.CanvasMapper(self),
'data': coordmap.DataMapper(self),
#'offset': coordmap.OffsetMapper(self),
'wcs': coordmap.WCSMapper(self),
}
# For callbacks
for name in ('transform', 'image-set', 'image-unset', 'configure',
'redraw', ):
self.enable_callback(name)
def set_window_size(self, width, height):
"""Report the size of the window to display the image.
Parameters
----------
width: int
the width of the window in pixels
height: int
the height of the window in pixels
redraw: boolean, optional, default==True
if True, will redraw the the image in the new dimensions
Notes
-----
This is called by the subclass with width and height as soon as
the actual dimensions of the allocated window are known.
Callbacks
---------
Will call any callbacks registered for the 'configure' event.
Callbacks should have a method signature of
(viewer, width, height, ...)
"""
self._imgwin_wd = width
self._imgwin_ht = height
self._ctr_x = width // 2
self._ctr_y = height // 2
self.logger.info("widget resized to %dx%d" % (width, height))
self.make_callback('configure', width, height)
self.redraw(whence=0)
def configure(self, width, height):
self._imgwin_set = True
self.set_window_size(width, height)
def set_desired_size(self, width, height):
self._desired_size = (width, height)
if not self._imgwin_set:
self.set_window_size(width, height)
def get_desired_size(self):
return self._desired_size
def get_window_size(self):
"""
Returns the window size in the underlying implementation as a tuple
of (width, height).
"""
## if not self._imgwin_set:
## raise ImageViewError("Dimensions of actual window are not yet determined")
return (self._imgwin_wd, self._imgwin_ht)
def get_dims(self, data):
"""
Returns the dimensions of numpy array data as a tuple of
(width, height). data may have more dimensions, but they are not
reported.
"""
height, width = data.shape[:2]
return (width, height)
def get_data_size(self):
"""
Returns the dimensions of the image currently being displayed as a
tuple of (width, height).
"""
image = self.get_image()
if image is None:
raise ImageViewNoDataError("No data found")
return image.get_size()
def get_settings(self):
"""
Returns the Settings object used by this instance.
"""
return self.t_
def get_logger(self):
"""
Returns the logger object used by this instance.
"""
return self.logger
def get_canvas(self):
return self.canvas
def set_canvas(self, canvas, private_canvas=None):
self.canvas = canvas
canvas.initialize(None, self, self.logger)
canvas.add_callback('modified', self.canvas_changed_cb)
canvas.set_surface(self)
canvas.ui_setActive(True)
self._imgobj = None
# private canvas set?
if not (private_canvas is None):
self.private_canvas = private_canvas
if private_canvas != canvas:
private_canvas.set_surface(self)
private_canvas.ui_setActive(True)
private_canvas.add_callback('modified', self.canvas_changed_cb)
# sanity check that we have a private canvas, and if not,
# set it to the "advertised" canvas
if self.private_canvas is None:
self.private_canvas = canvas
# make sure private canvas has our non-private one added
if (self.private_canvas != self.canvas) and (
not self.private_canvas.has_object(canvas)):
self.private_canvas.add(canvas)
def set_color_map(self, cmap_name):
"""Sets the color map.
Parameters
----------
cmap_name: string
the name of a color map
"""
cm = cmap.get_cmap(cmap_name)
self.set_cmap(cm)
def set_intensity_map(self, imap_name):
"""Sets the intensity map.
Parameters
----------
imap_name: string
the name of an intensity map
"""
im = imap.get_imap(imap_name)
self.set_imap(im)
def set_color_algorithm(self, calg_name, **kwdargs):
"""Sets the color distribution algorithm.
Parameters
----------
calg_name: string
the name of a color distribution algorithm
"""
distClass = ColorDist.get_dist(calg_name)
hashsize = self.rgbmap.get_hash_size()
dist = distClass(hashsize, **kwdargs)
self.set_calg(dist)
def get_color_algorithms(self):
return ColorDist.get_dist_names()
def set_cmap(self, cm):
self.rgbmap.set_cmap(cm)
def set_imap(self, im):
self.rgbmap.set_imap(im)
def set_calg(self, dist):
self.rgbmap.set_dist(dist)
def shift_cmap(self, pct):
self.rgbmap.shift(pct)
def scale_and_shift_cmap(self, scale_pct, shift_pct):
self.rgbmap.scale_and_shift(scale_pct, shift_pct)
def rgbmap_cb(self, rgbmap):
self.logger.debug("RGB map has changed.")
self.redraw(whence=2)
def cmap_changed_cb(self, setting, value):
# This method is a callback that is invoked when the color settings
# have changed in some way.
self.logger.debug("Color settings have changed.")
# Update our RGBMapper with any changes
cmap_name = self.t_.get('color_map', "gray")
cm = cmap.get_cmap(cmap_name)
self.rgbmap.set_cmap(cm, callback=False)
imap_name = self.t_.get('intensity_map', "ramp")
im = imap.get_imap(imap_name)
self.rgbmap.set_imap(im, callback=False)
hash_size = self.t_.get('color_hashsize', 65535)
self.rgbmap.set_hash_size(hash_size, callback=False)
hash_alg = self.t_.get('color_algorithm', "linear")
self.rgbmap.set_hash_algorithm(hash_alg, callback=True)
def get_rgbmap(self):
"""
Returns the RGBMapper object used by this instance.
"""
return self.rgbmap
def set_rgbmap(self, rgbmap):
"""
Set the RGBMapper object used by this instance. The RGBMapper
controls how the values in the image are mapped to color.
See RGBMap module.
"""
self.rgbmap = rgbmap
rgbmap.add_callback('changed', self.rgbmap_cb)
self.redraw(whence=2)
def get_image(self):
"""
Returns the image currently being displayed. The object returned
will be a subclass of BaseImage.
"""
if not (self._imgobj is None):
# quick optomization
return self._imgobj.get_image()
canvas_img = self.get_canvas_image()
return canvas_img.get_image()
def get_canvas_image(self):
if not (self._imgobj is None):
return self._imgobj
try:
# See if there is an image on the canvas
self._imgobj = self.canvas.get_object_by_tag(self._canvas_img_tag)
except KeyError:
# add a normalized image item to this canvas if we don't
# have one already--then just keep reusing it
NormImage = self.canvas.getDrawClass('normimage')
interp = self.t_.get('interpolation', 'basic')
self._imgobj = NormImage(0, 0, None, alpha=1.0,
interpolation=interp)
return self._imgobj
def set_image(self, image, add_to_canvas=True,
raise_initialize_errors=True):
"""
Sets an image to be displayed.
image should be a subclass of BaseImage.
If there is no error, this method will invoke the 'image-set'
callback.
If raise_initialize_errors is passed as False, then errors
relating to orienting/zooming/centering/autolevels will not
raise an exception, although an error message and traceback
will appear in the log.
"""
with self.suppress_redraw:
canvas_img = self.get_canvas_image()
old_image = canvas_img.get_image()
canvas_img.set_image(image)
if add_to_canvas:
try:
self.canvas.get_object_by_tag(self._canvas_img_tag)
except KeyError:
tag = self.canvas.add(canvas_img,
tag=self._canvas_img_tag)
#print("adding image to canvas %s" % self.canvas)
# move image to bottom of layers
self.canvas.lowerObject(canvas_img)
profile = image.get('profile', None)
try:
# initialize transform
if (profile is not None) and (self.t_['profile_use_transform']) and \
profile.has_key('flip_x'):
flip_x, flip_y = profile['flip_x'], profile['flip_y']
swap_xy = profile['swap_xy']
self.transform(flip_x, flip_y, swap_xy)
else:
self.logger.debug("auto orient (%s)" % (self.t_['auto_orient']))
if self.t_['auto_orient']:
self.auto_orient()
# initialize scale
if (profile is not None) and (self.t_['profile_use_scale']) and \
profile.has_key('scale_x'):
scale_x, scale_y = profile['scale_x'], profile['scale_y']
self.logger.debug("restoring scale to (%f,%f)" % (
scale_x, scale_y))
self.scale_to(scale_x, scale_y, no_reset=True)
else:
self.logger.debug("auto zoom (%s)" % (self.t_['autozoom']))
if self.t_['autozoom'] != 'off':
self.zoom_fit(no_reset=True)
# initialize pan position
if (profile is not None) and (self.t_['profile_use_pan']) and \
profile.has_key('pan_x'):
pan_x, pan_y = profile['pan_x'], profile['pan_y']
self.logger.debug("restoring pan position to (%f,%f)" % (
pan_x, pan_y))
self.set_pan(pan_x, pan_y, no_reset=True)
else:
# NOTE: False a possible value from historical use
self.logger.debug("auto center (%s)" % (self.t_['autocenter']))
if not self.t_['autocenter'] in ('off', False):
self.center_image(no_reset=True)
# initialize rotation
if (profile is not None) and (self.t_['profile_use_rotation']) and \
profile.has_key('rot_deg'):
rot_deg = profile['rot_deg']
self.rotate(rot_deg)
# initialize cuts
if (profile is not None) and (self.t_['profile_use_cuts']) and \
profile.has_key('cutlo'):
loval, hival = profile['cutlo'], profile['cuthi']
self.cut_levels(loval, hival, no_reset=True)
else:
self.logger.debug("auto cuts (%s)" % (self.t_['autocuts']))
if self.t_['autocuts'] != 'off':
self.auto_levels()
except Exception as e:
self.logger.error("Failed to initialize image: %s" % (str(e)))
try:
# log traceback, if possible
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
if raise_initialize_errors:
raise e
self.canvas.update_canvas(whence=0)
# update our display if the image changes underneath us
image.add_callback('modified', self._image_updated)
# out with the old, in with the new...
self.make_callback('image-unset', old_image)
self.make_callback('image-set', image)
def _image_updated(self, image):
with self.suppress_redraw:
canvas_img = self.get_canvas_image()
#canvas_img.set_image(image)
canvas_img.reset_optimize()
# Per issue #111, zoom and pan and cuts probably should
# not change if the image is _modified_, or it should be
# optional--these settings are only for _new_ images
# UPDATE: don't zoom/pan (assuming image size, etc. hasn't
# changed), but *do* apply cuts
try:
self.logger.debug("image data updated")
## if self.t_['auto_orient']:
## self.auto_orient()
## if self.t_['autozoom'] != 'off':
## self.zoom_fit(no_reset=True)
## if not self.t_['autocenter'] in ('off', False):
## self.center_image()
if self.t_['autocuts'] != 'off':
self.auto_levels()
except Exception as e:
self.logger.error("Failed to initialize image: %s" % (str(e)))
try:
# log traceback, if possible
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
self.canvas.update_canvas(whence=0)
def set_data(self, data, metadata=None):
"""
Sets an image to be displayed by providing raw data.
This is a convenience method for first constructing an image
with AstroImage and then calling set_image().
data should be at least a 2D numpy array.
metadata can be a dictionary (map-like) of image metadata.
"""
dims = data.shape
image = AstroImage.AstroImage(data, metadata=metadata,
logger=self.logger)
self.set_image(image)
def clear(self):
"""
Clear the displayed image.
"""
self._imgobj = None
try:
# See if there is an image on the canvas
self.canvas.delete_object_by_tag(self._canvas_img_tag)
except KeyError:
pass
def save_profile(self, **params):
image = self.get_image()
if (image is None):
return
profile = image.get('profile', None)
if (profile is None):
# If image has no profile then create one
profile = Settings.SettingGroup()
image.set(profile=profile)
self.logger.debug("saving to image profile: params=%s" % (
str(params)))
profile.set(**params)
## def apply_profile(self, image, profile, redraw=False):
## self.logger.info("applying existing profile found in image")
## if profile.has_key('scale_x'):
## scale_x, scale_y = profile['scale_x'], profile['scale_y']
## self.scale_to(scale_x, scale_y, no_reset=True, redraw=False)
## if profile.has_key('pan_x'):
## pan_x, pan_y = profile['pan_x'], profile['pan_y']
## self.set_pan(pan_x, pan_y, no_reset=True, redraw=False)
## if profile.has_key('cutlo'):
## loval, hival = profile['cutlo'], profile['cuthi']
## self.cut_levels(loval, hival, no_reset=True, redraw=False)
## if redraw:
## self.redraw(whence=0)
def copy_to_dst(self, target):
"""
Extract our image and call set_image() on the target with it.
"""
image = self.get_image()
target.set_image(image)
def redraw(self, whence=0):
if not self.defer_redraw:
if self._hold_redraw_cnt == 0:
self.redraw_now(whence=whence)
return
with self._defer_lock:
whence = min(self._defer_whence, whence)
# If there is no redraw scheduled:
if not self._defer_flag:
elapsed = time.time() - self.time_last_redraw
# If more time than defer_lagtime has passed since the
# last redraw then just do the redraw immediately
if elapsed > self.defer_lagtime:
if self._hold_redraw_cnt > 0:
#self._defer_flag = True
self._defer_whence = whence
return
self._defer_whence = 3
self.logger.debug("lagtime expired--forced redraw")
self.redraw_now(whence=whence)
return
# Indicate that a redraw is necessary and record whence
self._defer_flag = True
self._defer_whence = whence
# schedule a redraw by the end of the defer_lagtime
secs = self.defer_lagtime - elapsed
self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % (
whence, secs))
self.reschedule_redraw(secs)
else:
# A redraw is already scheduled. Just record whence.
self._defer_whence = whence
self.logger.debug("update whence=%.2f" % (whence))
def canvas_changed_cb(self, canvas, whence):
self.logger.debug("root canvas changed, whence=%d" % (whence))
self.redraw(whence=whence)
def delayed_redraw(self):
# This is the optomized redraw method
with self._defer_lock:
# pick up the lowest necessary level of redrawing
whence = self._defer_whence
self._defer_whence = 3
flag = self._defer_flag
self._defer_flag = False
if flag:
# If a redraw was scheduled, do it now
self.redraw_now(whence=whence)
def set_redraw_lag(self, lag_sec):
self.defer_redraw = (lag_sec > 0.0)
if self.defer_redraw:
self.defer_lagtime = lag_sec
def redraw_now(self, whence=0):
"""
Redraw the displayed image.
For the meaning of whence, see get_rgb_object().
"""
try:
time_start = time.time()
self.redraw_data(whence=whence)
# finally update the window drawable from the offscreen surface
self.update_image()
time_done = time.time()
time_delta = time_start - self.time_last_redraw
time_elapsed = time_done - time_start
self.time_last_redraw = time_done
self.logger.debug("widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec" % (
self.name, whence, time_delta, time_elapsed))
except Exception as e:
self.logger.error("Error redrawing image: %s" % (str(e)))
try:
# log traceback, if possible
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
def redraw_data(self, whence=0):
"""
Do not call this method unless you are implementing a subclass.
"""
if not self._imgwin_set:
# window has not been realized yet
return
rgbobj = self.get_rgb_object(whence=whence)
self.render_image(rgbobj, self._dst_x, self._dst_y)
self.private_canvas.draw(self)
# TODO: see if we can deprecate this fake callback
if whence <= 0:
self.make_callback('redraw')
def getwin_array(self, order='RGB', alpha=1.0):
order = order.upper()
depth = len(order)
# Prepare data array for rendering
data = self._rgbobj.get_array(order)
# NOTE [A]
height, width, depth = data.shape
imgwin_wd, imgwin_ht = self.get_window_size()
# create RGBA array for output
outarr = numpy.zeros((imgwin_ht, imgwin_wd, depth), dtype='uint8')
# fill image array with the background color
r, g, b = self.img_bg
bgval = dict(A=int(255*alpha), R=int(255*r), G=int(255*g), B=int(255*b))
for i in range(len(order)):
outarr[:, :, i] = bgval[order[i]]
# overlay our data
trcalc.overlay_image(outarr, self._dst_x, self._dst_y,
data, flipy=False, fill=False, copy=False)
return outarr
def getwin_buffer(self, order='RGB'):
outarr = self.getwin_array(order=order)
return outarr.tostring(order='C')
def get_datarect(self):
"""
Returns the approximate bounding box of the displayed image in
data coordinates (x1, y1, x2, y2).
"""
x1, y1, x2, y2 = self._org_x1, self._org_y1, self._org_x2, self._org_y2
return (x1, y1, x2, y2)
def get_rgb_object(self, whence=0):
"""
Create and return an RGB slices object representing the data
that should be rendered at this zoom level and pan settings.
`whence` is an optimization flag that reduces the time to create
the object by only recalculating what is necessary:
0 = new image, pan/scale has changed, or rotation/transform
has changed--recalculate everything
1 = cut levels or similar has changed
2 = color mapping has changed
3 = graphical overlays have changed
"""
time_start = time.time()
win_wd, win_ht = self.get_window_size()
order = self.get_rgb_order()
if (whence <= 0.0) or (self._rgbarr is None):
# calculate dimensions of window RGB backing image
wd, ht = self._calc_bg_dimensions(self._scale_x, self._scale_y,
self._pan_x, self._pan_y,
win_wd, win_ht)
# create backing image
depth = len(order)
rgba = numpy.zeros((ht, wd, depth), dtype=numpy.uint8)
self._rgbarr = rgba
if (whence <= 2.0) or (self._rgbarr2 is None):
# Apply any RGB image overlays
self._rgbarr2 = numpy.copy(self._rgbarr)
self.overlay_images(self.canvas, self._rgbarr2, whence=whence)
if (whence <= 2.5) or (self._rgbobj is None):
rotimg = self._rgbarr2
# Apply any viewing transformations or rotations
# if not applied earlier
rotimg = self.apply_transforms(rotimg,
self.t_['rot_deg'])
rotimg = numpy.ascontiguousarray(rotimg)
self._rgbobj = RGBMap.RGBPlanes(rotimg, order)
# convert to output ICC profile, if one is specified
output_profile = self.t_.get('icc_output_profile', None)
if not (output_profile is None):
self.convert_via_profile(self._rgbobj, 'working',
output_profile)
time_end = time.time()
self.logger.debug("times: total=%.4f" % (
(time_end - time_start)))
return self._rgbobj
def _calc_bg_dimensions(self, scale_x, scale_y,
pan_x, pan_y, win_wd, win_ht):
coord = self.t_.get('pan_coord', 'data')
if coord == 'wcs':
# <-- pan_x, pan_y are in WCS
image = self.get_image()
if image is None:
# TODO:
pan_x, pan_y = 0.0, 0.0
else:
pan_x, pan_y = image.radectopix(pan_x, pan_y)
# Sanity check on the scale
sx = float(win_wd) / scale_x
sy = float(win_ht) / scale_y
if (sx < 1.0) or (sy < 1.0):
new_scale_x = scale_x * sx
new_scale_y = scale_y * sy
self.logger.warn("scale adjusted downward X (%.4f -> %.4f), Y (%.4f -> %.4f)" % (
scale_x, new_scale_x, scale_y, new_scale_y))
scale_x, scale_y = new_scale_x, new_scale_y
# It is necessary to store these so that the get_data_xy()
# (below) calculations can proceed
self._org_x, self._org_y = pan_x - self.data_off, pan_y - self.data_off
self._org_scale_x, self._org_scale_y = scale_x, scale_y
# calc minimum size of pixel image we will generate
# necessary to fit the window in the desired size
# get the data points in the four corners
xul, yul = self.get_data_xy(0, 0, center=True)
xur, yur = self.get_data_xy(win_wd, 0, center=True)
xlr, ylr = self.get_data_xy(win_wd, win_ht, center=True)
xll, yll = self.get_data_xy(0, win_ht, center=True)
# determine bounding box
a1 = min(xul, xur, xlr, xll)
b1 = min(yul, yur, ylr, yll)
a2 = max(xul, xur, xlr, xll)
b2 = max(yul, yur, ylr, yll)
# constrain to integer indexes
x1, y1, x2, y2 = int(a1), int(b1), int(round(a2)), int(round(b2))
x1 = max(0, x1)
y1 = max(0, y1)
self.logger.debug("approx area covered is %dx%d to %dx%d" % (
x1, y1, x2, y2))
self._org_x1 = x1
self._org_y1 = y1
self._org_x2 = x2
self._org_y2 = y2
# Make a square from the scaled cutout, with room to rotate
slop = 20
side = int(math.sqrt(win_wd**2 + win_ht**2) + slop)
wd = ht = side
# Find center of new array
ncx, ncy = wd // 2, ht // 2
self._org_xoff, self._org_yoff = ncx, ncy
return (wd, ht)
def apply_transforms(self, data, rot_deg):
start_time = time.time()
wd, ht = self.get_dims(data)
xoff, yoff = self._org_xoff, self._org_yoff
# Do transforms as necessary
flip_x, flip_y = self.t_['flip_x'], self.t_['flip_y']
swap_xy = self.t_['swap_xy']
data = trcalc.transform(data, flip_x=flip_x, flip_y=flip_y,
swap_xy=swap_xy)
if flip_y:
yoff = ht - yoff
if flip_x:
xoff = wd - xoff
if swap_xy:
xoff, yoff = yoff, xoff
split_time = time.time()
self.logger.debug("reshape time %.3f sec" % (
split_time - start_time))
# dimensions may have changed in a swap axes
wd, ht = self.get_dims(data)
# Rotate the image as necessary
if rot_deg != 0:
# TODO: this is the slowest part of the rendering
# need to find a way to speed it up!
data = trcalc.rotate_clip(data, -rot_deg, out=data)
split2_time = time.time()
# apply other transforms
if self._invertY:
# Flip Y for natural natural Y-axis inversion between FITS coords
# and screen coords
data = numpy.flipud(data)
self.logger.debug("rotate time %.3f sec, total reshape %.3f sec" % (
split2_time - split_time, split2_time - start_time))
ctr_x, ctr_y = self._ctr_x, self._ctr_y
dst_x, dst_y = ctr_x - xoff, ctr_y - (ht - yoff)
self._dst_x, self._dst_y = dst_x, dst_y
self.logger.debug("ctr=%d,%d off=%d,%d dst=%d,%d cutout=%dx%d" % (
ctr_x, ctr_y, xoff, yoff, dst_x, dst_y, wd, ht))
return data
def overlay_images(self, canvas, data, whence=0.0):
#if not canvas.is_compound():
if not hasattr(canvas, 'objects'):
return
for obj in canvas.get_objects():
if hasattr(obj, 'draw_image'):
obj.draw_image(self, data, whence=whence)
elif obj.is_compound() and (obj != canvas):
self.overlay_images(obj, data, whence=whence)
def convert_via_profile(self, rgbobj, inprof_name, outprof_name):
# get rest of necessary conversion parameters
to_intent = self.t_.get('icc_output_intent', 'perceptual')
proofprof_name = self.t_.get('icc_proof_profile', None)
proof_intent = self.t_.get('icc_proof_intent', 'perceptual')
use_black_pt = self.t_.get('icc_black_point_compensation', False)
self.logger.info("Attempting conversion from '%s' to '%s' profile" % (
inprof_name, outprof_name))
inp = rgbobj.get_array('RGB')
arr = io_rgb.convert_profile_fromto(inp, inprof_name, outprof_name,
to_intent=to_intent,
proof_name=proofprof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt)
out = rgbobj.rgbarr
ri, gi, bi = rgbobj.get_order_indexes('RGB')
out[..., ri] = arr[..., 0]
out[..., gi] = arr[..., 1]
out[..., bi] = arr[..., 2]
def get_data_xy(self, win_x, win_y, center=True):
"""Returns the closest x, y coordinates in the data array to the
x, y coordinates reported on the window (win_x, win_y).
If center==True, then the coordinates are mapped such that the
pixel is centered on the square when the image is zoomed in past
1X. This is the specification of the FITS image standard,
that the pixel is centered on the integer row/column.
This function can take numpy arrays for win_x and win_y.
"""
# First, translate window coordinates onto pixel image
off_x, off_y = self.window_to_offset(win_x, win_y)
# Reverse scaling
off_x = off_x * (1.0 / self._org_scale_x)
off_y = off_y * (1.0 / self._org_scale_y)
# Add data index at (_ctr_x, _ctr_y) to offset
data_x = self._org_x + off_x
data_y = self._org_y + off_y
if center:
data_x += self.data_off
data_y += self.data_off
return (data_x, data_y)
def get_canvas_xy(self, data_x, data_y, center=True):
"""Returns the closest x, y coordinates in the graphics space to the
x, y coordinates in the data. data_x and data_y can be integer or
floating point values.
If center==True, then the coordinates are mapped such that the
integer pixel begins in the center of the square when the image
is zoomed in past 1X. This is the specification of the FITS image
standard, that the pixel is centered on the integer row/column.
This function can take numpy arrays for data_x and data_y.
"""
if center:
data_x -= self.data_off
data_y -= self.data_off
# subtract data indexes at center reference pixel
off_x = data_x - self._org_x
off_y = data_y - self._org_y
# scale according to current settings
off_x *= self._org_scale_x
off_y *= self._org_scale_y
win_x, win_y = self.offset_to_window(off_x, off_y)
return (win_x, win_y)
def offset_to_window(self, off_x, off_y, asint=True):
"""
This method can take numpy arrays for off_x and off_y.
"""
if self.t_['flip_x']:
off_x = - off_x
if self.t_['flip_y']:
off_y = - off_y
if self.t_['swap_xy']:
off_x, off_y = off_y, off_x
if self.t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y,
self.t_['rot_deg'])
# add center pixel to convert from X/Y coordinate space to
# canvas graphics space
win_x = off_x + self._ctr_x
if self._originUpper:
win_y = self._ctr_y - off_y
else:
win_y = off_y + self._ctr_y
# round to pixel units
if asint:
win_x = numpy.rint(win_x).astype(numpy.int)
win_y = numpy.rint(win_y).astype(numpy.int)
return (win_x, win_y)
def window_to_offset(self, win_x, win_y):
"""
This method can take numpy arrays for win_x and win_y.
"""
# make relative to center pixel to convert from canvas
# graphics space to standard X/Y coordinate space
off_x = win_x - self._ctr_x
if self._originUpper:
off_y = self._ctr_y - win_y
else:
off_y = win_y - self._ctr_y
if self.t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y,
-self.t_['rot_deg'])
if self.t_['swap_xy']:
off_x, off_y = off_y, off_x
if self.t_['flip_y']:
off_y = - off_y
if self.t_['flip_x']:
off_x = - off_x
return (off_x, off_y)
def canvascoords(self, data_x, data_y, center=True):
# data->canvas space coordinate conversion
x, y = self.get_canvas_xy(data_x, data_y, center=center)
return (x, y)
def get_data_pct(self, xpct, ypct):
width, height = self.get_data_size()
x = int(float(xpct) * (width-1))
y = int(float(ypct) * (height-1))
return (x, y)
def get_pan_rect(self):
"""Return the coordinates in the actual data corresponding to the
area shown in the display for the current zoom level and pan.
Returns ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) lower-left to
lower-right.
"""
points = []
wd, ht = self.get_window_size()
for x, y in ((0, 0), (wd-1, 0), (wd-1, ht-1), (0, ht-1)):
c, d = self.get_data_xy(x, y)
points.append((c, d))
return points
def get_data(self, data_x, data_y):
"""Get the data value at position (data_x, data_y). Indexes are
0-based, as in numpy.
"""
image = self.get_image()
if image is not None:
return image.get_data_xy(data_x, data_y)
raise ImageViewNoDataError("No image found")
def get_pixel_distance(self, x1, y1, x2, y2):
dx = abs(x2 - x1)
dy = abs(y2 - y1)
dist = math.sqrt(dx*dx + dy*dy)
dist = round(dist)
return dist
def scale_to(self, scale_x, scale_y, no_reset=False):
"""Scale the image in a channel.
Parameters
----------
chname: string
the name of the channel containing the image
scale_x: float
the scaling factor for the image in the X axis
scale_y: float
the scaling factor for the image in the Y axis
Returns
-------
0
See Also
--------
zoom_to
"""
ratio = float(scale_x) / float(scale_y)
if ratio < 1.0:
# Y is stretched
scale_x_base, scale_y_base = 1.0, 1.0 / ratio
else:
# X may be stretched
scale_x_base, scale_y_base = ratio, 1.0
if scale_x_base != scale_y_base:
zoomalg = 'rate'
else:
zoomalg = 'step'
self.t_.set(scale_x_base=scale_x_base, scale_y_base=scale_y_base,
#zoom_algorithm=zoomalg)
)
self._scale_to(scale_x, scale_y, no_reset=no_reset)
def _scale_to(self, scale_x, scale_y, no_reset=False):
# Check scale limits
maxscale = max(scale_x, scale_y)
if (maxscale > self.t_['scale_max']):
self.logger.error("Scale (%.2f) exceeds max scale limit (%.2f)" % (
maxscale, self.t_['scale_max']))
# TODO: exception?
return
minscale = min(scale_x, scale_y)
if (minscale < self.t_['scale_min']):
self.logger.error("Scale (%.2f) exceeds min scale limit (%.2f)" % (
minscale, self.t_['scale_min']))
# TODO: exception?
return
# Sanity check on the scale vs. window size
try:
win_wd, win_ht = self.get_window_size()
if (win_ht <= 0) or (win_ht <= 0):
# TODO: exception?
return
sx = float(win_wd) / scale_x
sy = float(win_ht) / scale_y
if (sx < 1.0) or (sy < 1.0):
new_scale_x = scale_x * sx
new_scale_y = scale_y * sy
self.logger.warn("scale adjusted downward X (%.4f -> %.4f), Y (%.4f -> %.4f)" % (
scale_x, new_scale_x, scale_y, new_scale_y))
scale_x, scale_y = new_scale_x, new_scale_y
except:
pass
self.t_.set(scale=(scale_x, scale_y))
# If user specified "override" or "once" for auto zoom, then turn off
# auto zoom now that they have set the zoom manually
if (not no_reset) and (self.t_['autozoom'] in ('override', 'once')):
self.t_.set(autozoom='off')
if self.t_['profile_use_scale']:
# Save scale with this image embedded profile
self.save_profile(scale_x=scale_x, scale_y=scale_y)
def scale_cb(self, setting, value):
scale_x, scale_y = self.t_['scale']
self._scale_x = scale_x
self._scale_y = scale_y
if self.t_['zoom_algorithm'] == 'rate':
zoom_x = math.log(scale_x / self.t_['scale_x_base'],
self.t_['zoom_rate'])
zoom_y = math.log(scale_y / self.t_['scale_y_base'],
self.t_['zoom_rate'])
# TODO: avg, max?
zoomlevel = min(zoom_x, zoom_y)
#print "calc zoom_x=%f zoom_y=%f zoomlevel=%f" % (
# zoom_x, zoom_y, zoomlevel)
else:
maxscale = max(scale_x, scale_y)
zoomlevel = maxscale
if zoomlevel < 1.0:
zoomlevel = - (1.0 / zoomlevel)
#print "calc zoomlevel=%f" % (zoomlevel)
self.t_.set(zoomlevel=zoomlevel)
self.redraw(whence=0)
def get_scale(self):
return self.get_scale_max()
def get_scale_max(self):
#scalefactor = max(self._org_scale_x, self._org_scale_y)
scalefactor = max(self._scale_x, self._scale_y)
return scalefactor
def get_scale_xy(self):
#return (self._org_scale_x, self._org_scale_y)
return (self._scale_x, self._scale_y)
def get_scale_base_xy(self):
return (self.t_['scale_x_base'], self.t_['scale_y_base'])
def set_scale_base_xy(self, scale_x_base, scale_y_base):
self.t_.set(scale_x_base=scale_x_base, scale_y_base=scale_y_base)
def get_scale_text(self):
scalefactor = self.get_scale()
if scalefactor >= 1.0:
#text = '%dx' % (int(scalefactor))
text = '%.2fx' % (scalefactor)
else:
#text = '1/%dx' % (int(1.0/scalefactor))
text = '1/%.2fx' % (1.0/scalefactor)
return text
def zoom_to(self, zoomlevel, no_reset=False):
"""Set zoom level on channel.
Parameters
----------
zoomlevel: int
the zoom level to zoom the image: negative is out, positive is in
Returns
-------
0
Notes
-----
The zoom level is an integer that calculates a zoom level based on
the zoom settings defined for the channel in preferences.
See Also
--------
scale
"""
if self.t_['zoom_algorithm'] == 'rate':
scale_x = self.t_['scale_x_base'] * (
self.t_['zoom_rate'] ** zoomlevel)
scale_y = self.t_['scale_y_base'] * (
self.t_['zoom_rate'] ** zoomlevel)
else:
if zoomlevel >= 1.0:
scalefactor = zoomlevel
elif zoomlevel < -1.0:
scalefactor = 1.0 / float(abs(zoomlevel))
else:
scalefactor = 1.0
scale_x = scale_y = scalefactor
## print("scale_x=%f scale_y=%f zoom=%f" % (
## scale_x, scale_y, zoomlevel))
self._scale_to(scale_x, scale_y, no_reset=no_reset)
def zoom_in(self):
if self.t_['zoom_algorithm'] == 'rate':
self.zoom_to(self.t_['zoomlevel'] + 1)
else:
zl = int(self.t_['zoomlevel'])
if (zl >= 1) or (zl <= -3):
self.zoom_to(zl + 1)
else:
self.zoom_to(1)
def zoom_out(self):
if self.t_['zoom_algorithm'] == 'rate':
self.zoom_to(self.t_['zoomlevel'] - 1)
else:
zl = int(self.t_['zoomlevel'])
if zl == 1:
self.zoom_to(-2)
elif (zl >= 2) or (zl <= -2):
self.zoom_to(zl - 1)
else:
self.zoom_to(1)
def zoom_fit(self, no_reset=False):
# calculate actual width of the image, considering rotation
try:
width, height = self.get_data_size()
except ImageViewNoDataError:
return
try:
wwidth, wheight = self.get_window_size()
self.logger.debug("Window size is %dx%d" % (wwidth, wheight))
if self.t_['swap_xy']:
wwidth, wheight = wheight, wwidth
except:
return
# zoom_fit also centers image
with self.suppress_redraw:
self.center_image(no_reset=no_reset)
ctr_x, ctr_y, rot_deg = self.get_rotation_info()
min_x, min_y, max_x, max_y = 0, 0, 0, 0
for x, y in ((0, 0), (width-1, 0), (width-1, height-1), (0, height-1)):
x0, y0 = trcalc.rotate_pt(x, y, rot_deg, xoff=ctr_x, yoff=ctr_y)
min_x, min_y = min(min_x, x0), min(min_y, y0)
max_x, max_y = max(max_x, x0), max(max_y, y0)
width, height = max_x - min_x, max_y - min_y
if min(width, height) <= 0:
return
# Calculate optimum zoom level to still fit the window size
if self.t_['zoom_algorithm'] == 'rate':
scale_x = float(wwidth) / (float(width) * self.t_['scale_x_base'])
scale_y = float(wheight) / (float(height) * self.t_['scale_y_base'])
scalefactor = min(scale_x, scale_y)
# account for t_[scale_x/y_base]
scale_x = scalefactor * self.t_['scale_x_base']
scale_y = scalefactor * self.t_['scale_y_base']
else:
scale_x = float(wwidth) / float(width)
scale_y = float(wheight) / float(height)
scalefactor = min(scale_x, scale_y)
scale_x = scale_y = scalefactor
self._scale_to(scale_x, scale_y, no_reset=no_reset)
if self.t_['autozoom'] == 'once':
self.t_.set(autozoom='off')
def get_zoom(self):
return self.t_['zoomlevel']
def get_zoomrate(self):
return self.t_['zoom_rate']
def set_zoomrate(self, zoomrate):
self.t_.set(zoom_rate=zoomrate)
def get_zoom_algorithm(self):
return self.t_['zoom_algorithm']
def set_zoom_algorithm(self, name):
name = name.lower()
assert name in ('step', 'rate'), \
ImageViewError("Alg '%s' must be one of: step, rate" % name)
self.t_.set(zoom_algorithm=name)
def zoomalg_change_cb(self, setting, value):
self.zoom_to(self.t_['zoomlevel'])
def interpolation_change_cb(self, setting, value):
canvas_img = self.get_canvas_image()
canvas_img.interpolation = value
canvas_img.reset_optimize()
self.redraw(whence=0)
def set_name(self, name):
self.name = name
def get_scale_limits(self):
return (self.t_['scale_min'], self.t_['scale_max'])
def set_scale_limits(self, scale_min, scale_max):
# TODO: force scale to within limits if already outside?
self.t_.set(scale_min=scale_min, scale_max=scale_max)
def enable_autozoom(self, option):
option = option.lower()
assert(option in self.autozoom_options), \
ImageViewError("Bad autozoom option '%s': must be one of %s" % (
str(self.autozoom_options)))
self.t_.set(autozoom=option)
def get_autozoom_options(self):
return self.autozoom_options
def set_pan(self, pan_x, pan_y, coord='data', no_reset=False):
with self.suppress_redraw:
self.t_.set(pan=(pan_x, pan_y), pan_coord=coord)
# If user specified "override" or "once" for auto center, then turn off
# auto center now that they have set the pan manually
if (not no_reset) and (self.t_['autocenter'] in ('override', 'once')):
self.t_.set(autocenter='off')
if self.t_['profile_use_pan']:
# Save pan position with this image embedded profile
self.save_profile(pan_x=pan_x, pan_y=pan_y)
def pan_cb(self, setting, value):
pan_x, pan_y = self.t_['pan']
self._pan_x = pan_x
self._pan_y = pan_y
self.logger.debug("pan set to %.2f,%.2f" % (pan_x, pan_y))
self.redraw(whence=0)
def get_pan(self, coord='data'):
pan_x, pan_y = self._pan_x, self._pan_y
if coord == 'wcs':
if self.t_['pan_coord'] == 'data':
image = self.get_image()
if image is not None:
try:
return image.pixtoradec(pan_x, pan_y)
except Exception as e:
pass
# <-- data already in coordinates form
return (pan_x, pan_y)
# <-- requesting data coords
if self.t_['pan_coord'] == 'data':
return (pan_x, pan_y)
image = self.get_image()
if image is not None:
try:
return image.radectopix(pan_x, pan_y)
except Exception as e:
pass
return (pan_x, pan_y)
def panset_xy(self, data_x, data_y, no_reset=False):
pan_coord = self.t_['pan_coord']
# To center on the pixel
if pan_coord == 'wcs':
image = self.get_image()
if image is None:
return
pan_x, pan_y = image.pixtoradec(data_x, data_y)
else:
pan_x, pan_y = data_x, data_y
self.set_pan(pan_x, pan_y, coord=pan_coord, no_reset=no_reset)
def panset_pct(self, pct_x, pct_y):
try:
width, height = self.get_data_size()
except ImageViewNoDataError:
return
data_x, data_y = width * pct_x, height * pct_y
self.panset_xy(data_x, data_y)
def center_image(self, no_reset=True):
try:
width, height = self.get_data_size()
except ImageViewNoDataError:
return
data_x, data_y = float(width) / 2.0, float(height) / 2.0
self.panset_xy(data_x, data_y, no_reset=no_reset)
# See Footnote [1]
## if redraw:
## self.redraw(whence=0)
if self.t_['autocenter'] == 'once':
self.t_.set(autocenter='off')
def set_autocenter(self, option):
option = option.lower()
assert(option in self.autocenter_options), \
ImageViewError("Bad autocenter option '%s': must be one of %s" % (
str(self.autocenter_options)))
self.t_.set(autocenter=option)
def get_autocenter_options(self):
return self.autocenter_options
def get_transforms(self):
return (self.t_['flip_x'], self.t_['flip_y'], self.t_['swap_xy'])
def get_cut_levels(self):
return self.t_['cuts']
def cut_levels(self, loval, hival, no_reset=False):
"""Apply cut levels on the image view.
Parameters
----------
loval : float
the low value of the cut levels
hival : float
the high value of the cut levels
"""
self.t_.set(cuts=(loval, hival))
# If user specified "override" or "once" for auto levels,
# then turn off auto levels now that they have set the levels
# manually
if (not no_reset) and (self.t_['autocuts'] in ('once', 'override')):
self.t_.set(autocuts='off')
if self.t_['profile_use_cuts']:
# Save cut levels with this image embedded profile
self.save_profile(cutlo=loval, cuthi=hival)
def auto_levels(self, autocuts=None):
"""
Apply an auto cut levels on the image view.
Parameters
----------
autocuts : a ginga.AutoCuts.* compatible object
An object that implements the auto cuts algorithms
redraw : boolean, optional
If True, will redraw the image with the cut levels applied
"""
if autocuts is None:
autocuts = self.autocuts
image = self.get_image()
if image is None:
return
loval, hival = autocuts.calc_cut_levels(image)
# this will invoke cut_levels_cb()
self.t_.set(cuts=(loval, hival))
# If user specified "once" for auto levels, then turn off
# auto levels now that we have cut levels established
if self.t_['autocuts'] == 'once':
self.t_.set(autocuts='off')
def auto_levels_cb(self, setting, value):
# Did we change the method?
method = self.t_['autocut_method']
params = self.t_.get('autocut_params', [])
# TEMP: params is stored as a list of tuples
params = dict(params)
if method != str(self.autocuts):
ac_class = AutoCuts.get_autocuts(method)
self.autocuts = ac_class(self.logger, **params)
else:
# TODO: find a cleaner way to update these
self.autocuts.__dict__.update(params)
# Redo the auto levels
#if self.t_['autocuts'] != 'off':
# NOTE: users seems to expect that when the auto cuts parameters
# are changed that the cuts should be immediately recalculated
self.auto_levels()
def cut_levels_cb(self, setting, value):
self.redraw(whence=1)
def enable_autocuts(self, option):
option = option.lower()
assert(option in self.autocuts_options), \
ImageViewError("Bad autocuts option '%s': must be one of %s" % (
str(self.autocuts_options)))
self.t_.set(autocuts=option)
def get_autocuts_options(self):
return self.autocuts_options
def set_autocut_params(self, method, **params):
self.logger.debug("Setting autocut params method=%s params=%s" % (
method, str(params)))
params = list(params.items())
self.t_.set(autocut_method=method, autocut_params=params)
def get_autocut_methods(self):
return self.autocuts.get_algorithms()
def set_autocuts(self, autocuts):
"""
Set the autocuts class instance that determines the algorithm used
for calculating cut levels.
"""
self.autocuts = autocuts
def transform(self, flip_x, flip_y, swap_xy):
"""Transforms view of image.
Parameters
----------
flipx: boolean
if True, flip the image in the X axis
flipy: boolean
if True, flip the image in the Y axis
swapxy: boolean
if True, swap the X and Y axes
Returns
-------
0
Notes
-----
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments.
See Also
--------
rotate
"""
self.logger.debug("flip_x=%s flip_y=%s swap_xy=%s" % (
flip_x, flip_y, swap_xy))
with self.suppress_redraw:
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
if self.t_['profile_use_transform']:
# Save transform with this image embedded profile
self.save_profile(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
def transform_cb(self, setting, value):
self.make_callback('transform')
# whence=0 because need to calculate new extents for proper
# cutout for rotation (TODO: always make extents consider
# room for rotation)
whence = 0
self.redraw(whence=whence)
def copy_attributes(self, dst_fi, attrlist):
"""Copy interesting attributes of our configuration to another
instance of a ImageView."""
with dst_fi.suppress_redraw:
if 'transforms' in attrlist:
dst_fi.transform(self.t_['flip_x'], self.t_['flip_y'],
self.t_['swap_xy'])
if 'rotation' in attrlist:
dst_fi.rotate(self.t_['rot_deg'])
if 'cutlevels' in attrlist:
loval, hival = self.t_['cuts']
dst_fi.cut_levels(loval, hival)
if 'rgbmap' in attrlist:
#dst_fi.set_rgbmap(self.rgbmap)
dst_fi.rgbmap = self.rgbmap
if 'zoom' in attrlist:
dst_fi.zoom_to(self.t_['zoomlevel'])
if 'pan' in attrlist:
dst_fi.set_pan(self._pan_x, self._pan_y)
dst_fi.redraw(whence=0)
def get_rotation(self):
return self.t_['rot_deg']
def rotate(self, deg):
"""Rotates the view of image in channel.
Parameters
----------
deg: float
degrees to rotate the image
Returns
-------
0
Notes
-----
Transforming the image is generally faster than rotating,
if rotating in 90 degree increments.
See Also
--------
transform
"""
self.t_.set(rot_deg=deg)
if self.t_['profile_use_rotation']:
# Save rotation with this image embedded profile
self.save_profile(rot_deg=deg)
def rotation_change_cb(self, setting, value):
# whence=0 because need to calculate new extents for proper
# cutout for rotation (TODO: always make extents consider
# room for rotation)
whence = 0
self.redraw(whence=whence)
def get_center(self):
return (self._ctr_x, self._ctr_y)
def get_rgb_order(self):
return 'RGB'
def get_rotation_info(self):
return (self._ctr_x, self._ctr_y, self.t_['rot_deg'])
def enable_auto_orient(self, tf):
self.t_.set(auto_orient=tf)
def auto_orient(self):
"""Set the orientation for the image to a reasonable default.
"""
image = self.get_image()
if image is None:
return
invertY = not isinstance(image, AstroImage.AstroImage)
# Check for various things to set based on metadata
header = image.get_header()
if header:
# Auto-orientation
orient = header.get('Orientation', None)
if not orient:
orient = header.get('Image Orientation', None)
self.logger.debug("orientation [%s]" % (
orient))
if orient:
try:
orient = int(str(orient))
self.logger.info("setting orientation from metadata [%d]" % (
orient))
flip_x, flip_y, swap_xy = self.orientMap[orient]
self.transform(flip_x, flip_y, swap_xy)
invertY = False
except Exception as e:
# problems figuring out orientation--let it be
self.logger.error("orientation error: %s" % str(e))
pass
if invertY:
flip_x, flip_y, swap_xy = self.get_transforms()
#flip_y = not flip_y
flip_y = True
self.transform(flip_x, flip_y, swap_xy)
def get_coordmap(self, key):
return self.coordmap[key]
def set_coordmap(self, key, mapper):
self.coordmap[key] = mapper
# TO BE DEPRECATED
def enable_overlays(self, tf):
self.t_.set(image_overlays=tf)
# TO BE DEPRECATED
def overlays_change_cb(self, setting, value):
self.redraw(whence=2)
def set_bg(self, r, g, b):
"""Set the background color. Values r, g, b should be between
0 and 1 inclusive.
"""
self.img_bg = (r, g, b)
self.redraw(whence=3)
def get_bg(self):
return self.img_bg
def set_fg(self, r, g, b):
"""Set the foreground color. Values r, g, b should be between
0 and 1 inclusive.
"""
self.img_fg = (r, g, b)
self.redraw(whence=3)
def get_fg(self):
return self.img_fg
def is_compound(self):
# this is overridden by subclasses which can overplot objects
return False
def update_image(self):
self.logger.warn("Subclass should override this abstract method!")
def render_image(self, rgbobj, dst_x, dst_y):
self.logger.warn("Subclass should override this abstract method!")
def reschedule_redraw(self, time_sec):
self.logger.warn("Subclass should override this abstract method!")
## class SuppressRedraw(object):
## def __init__(self, viewer):
## self.viewer = viewer
## def __enter__(self):
## self.viewer._hold_redraw_cnt += 1
## return self
## def __exit__(self, exc_type, exc_val, exc_tb):
## self.viewer._hold_redraw_cnt -= 1
## if (self.viewer._hold_redraw_cnt <= 0):
## # TODO: whence should be largest possible
## # maybe self.viewer._defer_whence ??
## whence = 0
## self.viewer.redraw(whence=whence)
## return False
class SuppressRedraw(object):
def __init__(self, viewer):
self.viewer = viewer
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# FOOTNOTES
# [1] This redraw is redundant due to the automatic redraw happening via
# a preferences callback. It is commented out, but left here in case
# we can/need to implement it again.
#END
|
eteq/ginga
|
ginga/ImageView.py
|
Python
|
bsd-3-clause
| 68,628
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import sys
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = "<SIL>"
wrd_to_phn = {}
with open(args.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
assert len(items) > 1, line
assert items[0] not in wrd_to_phn, items
wrd_to_phn[items[0]] = items[1:]
for line in sys.stdin:
words = line.strip().split()
if not all(w in wrd_to_phn for w in words):
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
phones.extend(wrd_to_phn[w])
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.append(sil)
if surround:
phones.append(sil)
print(" ".join(phones))
if __name__ == "__main__":
main()
|
pytorch/fairseq
|
examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py
|
Python
|
mit
| 2,045
|
# -*- coding: utf-8 -*-
#
# This file is part of hopr: https://github.com/hopr/hopr.
#
# Hopr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hopr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hopr. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest as ut
import tempfile
from mock import MagicMock, sentinel, call
from hopr.tool.run import * # Run, parse_args, run
# TODO: Suppress log output during tests.
class Test1Misc(ut.TestCase):
def test_timeout(self):
dt = 0.01
e = 0.001
timeout = Timeout(dt)
t1 = time()
while(True):
a = timeout()
t2 = time()
if t2 - t1 < dt-e:
self.assertEqual(a, False)
else:
break
while(t2 - t1 <= dt + e):
t2 = time()
self.assertEqual(timeout(), True)
class TestParseArgs(ut.TestCase):
def setUp(self):
self.args = {'no_grab': False,
'timeout': 5,
'log_level': 'info',
'print_keymap': False,
'log_file': '',
'config_dir': '',
}
def test2_parse_args(self):
x = parse_args('--no-grab -t 10 --log-level warning'.split())
self.args.update({'no_grab': True,
'timeout': 10,
'log_level': 'warning',
})
self.assertEqual(self.args, vars(x))
def test2_no_timeout(self):
x = parse_args('-x'.split())
self.args.update({'timeout': 0})
self.assertEqual(self.args, vars(x))
def test1_parse_args_defaults(self):
x = parse_args(''.split())
self.assertEqual({'no_grab': False,
'timeout': 5,
'log_level': 'info',
'log_file': '',
'config_dir': '',
'print_keymap': False,
}, vars(x))
def test1_parse_args_defaults(self):
x = parse_args('--log-file log.txt'.split())
self.assertEqual({'no_grab': False,
'timeout': 5,
'log_level': 'info',
'log_file': 'log.txt',
'config_dir': '',
'print_keymap': False,
}, vars(x))
class TestRun(ut.TestCase):
def setUp(self):
params = dict(event_parser=MagicMock(name='parser'),
event_wrapper=MagicMock(name='event_wrapper'),
find_keyboards=MagicMock(name='find_keyboards'),
read_events=MagicMock(name='read_events'),
grab_keyboards=MagicMock(name='grab_keyboards'))
for k,v in list(params.items()):
setattr(self, k, v)
self.run = partial(run, **params)
def test1_no_events(self):
self.run(timeout=5,
no_grab=True)
def test2_keyboards_are_optionally_grabbed(self):
kbds = [sentinel.kbd1, sentinel.kbd2]
self.find_keyboards.return_value = kbds
self.run(no_grab=True)
self.grab_keyboards.assert_not_called()
self.run(no_grab=False)
self.grab_keyboards.assert_called_once_with(kbds)
def test2_keyboards_events_are_read(self):
kbds = [sentinel.kbd1, sentinel.kbd2]
self.find_keyboards.return_value = kbds
self.run()
self.read_events.assert_called_once_with(kbds)
def test2_events_are_wrapped_before_parsing(self):
events = [sentinel.event]
self.read_events.return_value = events
self.event_wrapper.return_value = sentinel.wrapped_event
self.run()
self.event_wrapper.assert_called_once_with(sentinel.event)
self.event_parser.assert_called_once_with(sentinel.wrapped_event)
def test2_events_are_sent_to_parser(self):
events = [sentinel.event1, sentinel.event2]
self.read_events.return_value = events
self.event_wrapper.side_effect = lambda x : x
self.run()
self.event_parser.assert_has_calls([call(e) for e in events])
def test3_timeout(self):
self.run(timeout=-1)
class TestRunFunction(ut.TestCase):
def test(self):
backend = MagicMock(name='backend')
make_eventparser = MagicMock(name='make_eventparser')
args = '--log-level=error'.split()
run_parse_args(backend=backend,
make_eventparser=make_eventparser,
args=args)
def test_log_file(self):
f = tempfile.NamedTemporaryFile('r')
backend = MagicMock(name='backend')
make_eventparser = MagicMock(name='make_eventparser')
args = ['--log-level', 'debug', '--log-file', f.name]
run_parse_args(backend=backend,
make_eventparser=make_eventparser,
args=args)
logging.getLogger().debug('Test Message')
text = f.read()
self.assertTrue(text.strip().endswith('Test Message'))
if __name__ == "__main__":
# import logging
# logging.getLogger().setLevel('ERROR')
ut.main(failfast=True, exit=False)
|
hopr/hopr
|
hopr/tool/test_run.py
|
Python
|
gpl-3.0
| 5,871
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from pants.base.exceptions import TaskError
from pants.reporting.reporting_server import ReportingServerManager
from pants.task.task import QuietTaskMixin, Task
from pants.util import desktop
logger = logging.getLogger(__name__)
class ReportingServerRun(QuietTaskMixin, Task):
"""Run the reporting server."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--port",
type=int,
default=0,
help="Serve on this port. Leave unset to choose a free port "
"automatically (recommended if using pants concurrently in "
"multiple workspaces on the same host).",
)
register(
"--allowed-clients",
type=list,
default=["127.0.0.1"],
help="Only requests from these IPs may access this server. Useful for "
"temporarily showing build results to a colleague. The special "
"value ALL means any client may connect. Use with caution, as "
"your source code is exposed to all allowed clients!",
)
register("--open", type=bool, help="Attempt to open the server web ui in a browser.")
register(
"--template-dir",
advanced=True,
help="Use templates from this dir instead of the defaults.",
)
register(
"--assets-dir", advanced=True, help="Use assets from this dir instead of the defaults."
)
def _maybe_open(self, port):
if self.get_options().open:
try:
desktop.ui_open("http://localhost:{port}".format(port=port))
except desktop.OpenError as e:
raise TaskError(e)
def execute(self):
manager = ReportingServerManager(self.context, self.get_options())
if manager.is_alive():
logger.info(
"Server already running with pid {pid} at http://localhost:{port}".format(
pid=manager.pid, port=manager.socket
)
)
else:
manager.daemonize()
manager.await_socket(10)
logger.info(
f"Launched server with pid {manager.pid} at http://localhost:{manager.socket}"
)
logger.info(f"To kill, run `{self.get_options().pants_bin_name} killserver`")
self._maybe_open(manager.socket)
|
tdyas/pants
|
src/python/pants/core_tasks/reporting_server_run.py
|
Python
|
apache-2.0
| 2,593
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs: Any
) -> "_models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_express_route_cross_connection_peerings_operations.py
|
Python
|
mit
| 22,362
|
import os
COURSERA_DOCKER_LABEL = 'courseraResearchExport'
COURSERA_LOCAL_FOLDER = os.path.expanduser('~/.coursera/exports/')
POSTGRES_DOCKER_IMAGE = 'postgres:9.5'
POSTGRES_INIT_MSG = 'PostgreSQL init process complete; ready for start up.'
POSTGRES_READY_MSG = 'database system is ready to accept connections'
|
coursera/courseraresearchexports
|
courseraresearchexports/constants/container_constants.py
|
Python
|
apache-2.0
| 312
|
X = 42
N = 42
A = 11
M = 24
K = 21
T = 28
a = N + M + T + 1
O = 22
def f():
print(a)
|
jwren/intellij-community
|
python/testData/refactoring/introduceConstant/insertAfterAllGlobalVariablesOnWhichDepends.after.py
|
Python
|
apache-2.0
| 90
|
#! /usr/local/bin/python3
from discoursesso import DiscourseSSO
"""
These are the credentials used in the example
https://meta.discourse.org/t/official-single-sign-on-for-discourse/13045
"""
payload = "bm9uY2U9Y2I2ODI1MWVlZmI1MjExZTU4YzAwZmYxMzk1ZjBjMGI%3D%0A"
secret_key = "d836444a9e4084d5b224a60c208dce14"
sig = "2828aa29899722b35a2f191d34ef9b3ce695e0e6eeec47deb46d588d70c7cb56"
min_req_credentials = {
"external_id": "welenofsky",
"nonce": "aod0f9ahdfha9d8hf8a",
"email": "email@example.com"
}
sso = DiscourseSSO(secret_key)
sso.validate(payload, sig)
print("Nonce From Payload: ", sso.get_nonce(payload))
print("Generated Login URL:")
print("http://discuss.example.com/session/sso_login?%s" % sso.build_login_URL(min_req_credentials))
|
ravikiranj/discourse-sso-python-ldap
|
runner.py
|
Python
|
bsd-2-clause
| 765
|
from pywb.utils.canonicalize import UrlCanonicalizer
from pywb.utils.wbexception import NotFoundException
from pywb.cdx.cdxops import cdx_load
from pywb.cdx.cdxsource import CDXSource, CDXFile, RemoteCDXSource, RedisCDXSource
from pywb.cdx.zipnum import ZipNumCluster
from pywb.cdx.cdxobject import CDXObject, CDXException
from pywb.cdx.query import CDXQuery
from pywb.cdx.cdxdomainspecific import load_domain_specific_cdx_rules
from pywb.utils.loaders import is_http
from itertools import chain
import logging
import os
#=================================================================
class BaseCDXServer(object):
def __init__(self, **kwargs):
ds_rules_file = kwargs.get('ds_rules_file')
surt_ordered = kwargs.get('surt_ordered', True)
# load from domain-specific rules
if ds_rules_file:
self.url_canon, self.fuzzy_query = (
load_domain_specific_cdx_rules(ds_rules_file, surt_ordered))
# or custom passed in canonicalizer
else:
self.url_canon = kwargs.get('url_canon')
self.fuzzy_query = kwargs.get('fuzzy_query')
# set default canonicalizer if none set thus far
if not self.url_canon:
self.url_canon = UrlCanonicalizer(surt_ordered)
def _check_cdx_iter(self, cdx_iter, query):
""" Check cdx iter semantics
If `cdx_iter` is empty (no matches), check if fuzzy matching
is allowed, and try it -- otherwise,
throw :exc:`~pywb.utils.wbexception.NotFoundException`
"""
cdx_iter = self.peek_iter(cdx_iter)
if cdx_iter:
return cdx_iter
# check if fuzzy is allowed and ensure that its an
# exact match
if (self.fuzzy_query and
query.allow_fuzzy and
query.is_exact):
fuzzy_query_params = self.fuzzy_query(query)
if fuzzy_query_params:
return self.load_cdx(**fuzzy_query_params)
msg = 'No Captures found for: ' + query.url
if not query.is_exact:
msg += ' (' + query.match_type + ' query)'
raise NotFoundException(msg, url=query.url)
#def _calc_search_keys(self, query):
# return calc_search_range(url=query.url,
# match_type=query.match_type,
# url_canon=self.url_canon)
def load_cdx(self, **params):
params['_url_canon'] = self.url_canon
query = CDXQuery(params)
#key, end_key = self._calc_search_keys(query)
#query.set_key(key, end_key)
cdx_iter = self._load_cdx_query(query)
return self._check_cdx_iter(cdx_iter, query)
def _load_cdx_query(self, query): # pragma: no cover
raise NotImplementedError('Implement in subclass')
@staticmethod
def peek_iter(iterable):
try:
first = next(iterable)
except StopIteration:
return None
return chain([first], iterable)
#=================================================================
class CDXServer(BaseCDXServer):
"""
Top-level cdx server object which maintains a list of cdx sources,
responds to queries and dispatches to the cdx ops for processing
"""
def __init__(self, paths, **kwargs):
super(CDXServer, self).__init__(**kwargs)
# TODO: we could save config in member, so that other
# methods can use it. it's bad for add_cdx_source to take
# config argument.
self._create_cdx_sources(paths, kwargs.get('config'))
def _load_cdx_query(self, query):
"""
load CDX for query parameters ``params``.
``key`` (or ``url``) parameter specifies URL to query,
``matchType`` parameter specifies matching method for ``key``
(default ``exact``).
other parameters are passed down to :func:`cdx_load`.
raises :exc:`~pywb.utils.wbexception.NotFoundException`
if no captures are found.
:param query: query parameters
:type query: :class:`~pywb.cdx.query.CDXQuery`
:rtype: iterator on :class:`~pywb.cdx.cdxobject.CDXObject`
"""
return cdx_load(self.sources, query)
def _create_cdx_sources(self, paths, config):
"""
build CDXSource instances for each of path in ``paths``.
:param paths: list of sources or single source.
each source may be either string or CDXSource instance. value
of any other types will be silently ignored.
:param config: config object passed to :method:`add_cdx_source`.
"""
self.sources = []
if paths is not None:
if not isinstance(paths, (list, tuple)):
paths = [paths]
for path in paths:
self.add_cdx_source(path, config)
if len(self.sources) == 0:
logging.warn('No CDX Sources configured from paths=%s', paths)
def _add_cdx_source(self, source):
if source is None:
return
logging.debug('Adding CDX Source: %s', source)
self.sources.append(source)
def add_cdx_source(self, source, config):
if isinstance(source, CDXSource):
self._add_cdx_source(source)
elif isinstance(source, str):
if os.path.isdir(source):
for fn in os.listdir(source):
self._add_cdx_source(self._create_cdx_source(
os.path.join(source, fn), config))
else:
self._add_cdx_source(self._create_cdx_source(
source, config))
def _create_cdx_source(self, filename, config):
if is_http(filename):
return RemoteCDXSource(filename)
if filename.startswith('redis://'):
return RedisCDXSource(filename, config)
if filename.endswith(('.cdx', '.cdxj')):
return CDXFile(filename)
if filename.endswith(('.summary', '.idx')):
return ZipNumCluster(filename, config)
# no warning for .loc or .gz (zipnum)
if not filename.endswith(('.loc', '.gz')):
logging.warn('skipping unrecognized URI: %s', filename)
return None
#=================================================================
class RemoteCDXServer(BaseCDXServer):
"""
A special cdx server that uses a single
:class:`~pywb.cdx.cdxsource.RemoteCDXSource`.
It simply proxies the query params to the remote source
and performs no local processing/filtering
"""
def __init__(self, source, **kwargs):
super(RemoteCDXServer, self).__init__(**kwargs)
if isinstance(source, RemoteCDXSource):
self.source = source
elif (isinstance(source, str) and is_http(source)):
self.source = RemoteCDXSource(source, remote_processing=True)
else:
raise Exception('Invalid remote cdx source: ' + str(source))
def _load_cdx_query(self, query):
return cdx_load([self.source], query, process=False)
#=================================================================
def create_cdx_server(config, ds_rules_file=None, server_cls=None):
if hasattr(config, 'get'):
paths = config.get('index_paths')
surt_ordered = config.get('surt_ordered', True)
pass_config = config
else:
paths = config
surt_ordered = True
pass_config = None
logging.debug('CDX Surt-Ordered? ' + str(surt_ordered))
if not server_cls:
if ((isinstance(paths, str) and is_http(paths)) or
isinstance(paths, RemoteCDXSource)):
server_cls = RemoteCDXServer
else:
server_cls = CDXServer
return server_cls(paths,
config=pass_config,
surt_ordered=surt_ordered,
ds_rules_file=ds_rules_file)
|
pombredanne/pywb
|
pywb/cdx/cdxserver.py
|
Python
|
gpl-3.0
| 7,874
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: tst-a1.py 56295 2015-06-09 14:29:55Z vboxsync $
"""
Analyzer Experiment 1.
"""
__copyright__ = \
"""
Copyright (C) 2010-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 56295 $"
import os.path
import sys
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testanalysis import reader ## @todo fix testanalysis/__init__.py.
from testanalysis import reporting
from testanalysis import diff
def usage():
""" Display usage """
print 'usage: %s [options] <testresults.xml> [baseline.xml]' % (sys.argv[0]);
print ''
print 'options:'
print ' --filter <test-sub-string>'
return 1;
def main(asArgs):
""" C styl main(). """
# Parse arguments
sTestFile = None;
sBaseFile = None;
asFilters = [];
iArg = 1;
while iArg < len(asArgs):
if asArgs[iArg] == '--filter':
iArg += 1;
asFilters.append(asArgs[iArg]);
elif asArgs[iArg].startswith('--help'):
return usage();
elif asArgs[iArg].startswith('--'):
print 'syntax error: unknown option "%s"' % (asArgs[iArg]);
return usage();
elif sTestFile is None:
sTestFile = asArgs[iArg];
elif sBaseFile is None:
sBaseFile = asArgs[iArg];
else:
print 'syntax error: too many file names: %s' % (asArgs[iArg])
return usage();
iArg += 1;
# Down to business
oTestTree = reader.parseTestResult(sTestFile);
if oTestTree is None:
return 1;
oTestTree = oTestTree.filterTests(asFilters)
if sBaseFile is not None:
oBaseline = reader.parseTestResult(sBaseFile);
if oBaseline is None:
return 1;
oTestTree = diff.baselineDiff(oTestTree, oBaseline);
if oTestTree is None:
return 1;
reporting.produceTextReport(oTestTree);
return 0;
if __name__ == '__main__':
sys.exit(main(sys.argv));
|
carmark/vbox
|
src/VBox/ValidationKit/testanalysis/tst-a1.py
|
Python
|
gpl-2.0
| 3,077
|
#!/usr/bin/env python
from builtins import map
import os
import sys
from PyAnalysisTools.AnalysisTools.CutFlowAnalyser import CutflowAnalyser as ca
from PyAnalysisTools.AnalysisTools.CutFlowAnalyser import ExtendedCutFlowAnalyser as eca
from PyAnalysisTools import base
try:
from tabulate.tabulate import tabulate_formats
except ImportError:
from tabulate import tabulate_formats
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
def main(argv):
parser = base.get_default_argparser("Cutflow printer")
base.add_input_args(parser)
base.add_output_args(parser)
base.add_process_args(parser)
base.add_friend_args(parser)
base.add_selection_args(parser)
parser.add_argument('--config_file', '-cf', default=None, help='config file')
parser.add_argument('-disable_interactive', '-di', action='store_true', default=False, help="Disable interactive"
"mode")
parser.add_argument('--enable_signal_plots', '-esp', action='store_true', default=False, help='Enable plots for '
'signal efficiency')
parser.add_argument('--disable_sm_total', '-dsm', default=False, action='store_true',
help="disable summing sm total")
parser.add_argument('--format', '-f', choices=list(map(str, tabulate_formats)),
help="format of printed table")
parser.add_argument('--no_merge', '-n', action='store_true', default=False, help="switch off merging")
parser.add_argument('--precision', '-p', type=int, default=3, help="precision of printed numbers")
parser.add_argument('--raw', '-r', action='store_true', default=False, help="print raw cutflow")
parser.add_argument('--systematics', '-s', nargs='+', default=['Nominal'], help="systematics")
parser.add_argument('--module_config_files', '-mcf', nargs='+', default=None,
help='config of additional modules to apply')
parser.add_argument('--enable_eff', '-ee', action='store_true', default=False, help='Enable cut efficiencies')
parser.add_argument('--percent_eff', '-per', action='store_true', default=False,
help='Calculate cut efficiencies in percent')
parser.add_argument('--save_table', '-st', action='store_true', default=False, help='store cutflow to file')
parser.add_argument('--output_tag', default=None, help='additional tag for file names storing enabled')
parser.add_argument("--disable_cutflow_reading", "-dcr", action='store_true', default=False,
help="disable reading of initial cutflows. Lumi weighting won't work apparently.")
args = base.default_init(parser)
args.file_list = [os.path.abspath(f) for f in args.input_file_list]
if args.selection_config is None:
cutflow_analyser = ca(**vars(args))
else:
cutflow_analyser = eca(**vars(args))
cutflow_analyser.execute()
cutflow_analyser.print_cutflow_table()
if __name__ == '__main__':
main(sys.argv[1:])
|
morgenst/PyAnalysisTools
|
run_scripts/print_cutflow.py
|
Python
|
mit
| 3,152
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for interacting with the `tf.Session`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SessionBenchmark(test.Benchmark):
"""Tests and benchmarks for interacting with the `tf.Session`."""
def _benchmarkFeed(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of feeding a tensor.
Reports the median cost of feeding a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be feed.
iters: The number of iterations to perform.
"""
feed_val = np.random.rand(size).astype(np.float32)
times = []
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=[size])
# Fetch the operation rather than the tensor, to avoid measuring the time
# to fetch back the value.
no_op = array_ops.identity(p).op
with session.Session(target) as sess:
sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(no_op, feed_dict={p: feed_val})
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetch(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v)
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetchPrebuilt(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
runner = sess.make_callable(v)
runner() # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
runner()
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkRunOp(self, name, target, iters):
"""Runs a microbenchmark to measure the cost of running an op.
Reports the median cost of running a trivial (Variable) op.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the op to be run as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v.op) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v.op)
end_time = time.time()
times.append(end_time - start_time)
print("%s %f" % (name, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkRunOpPrebuilt(self, name, target, iters):
"""Runs a microbenchmark to measure the cost of running an op.
Reports the median cost of running a trivial (Variable) op.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the op to be run as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([]))
with session.Session(target) as sess:
sess.run(v.initializer)
runner = sess.make_callable(v.op)
runner() # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
runner()
end_time = time.time()
times.append(end_time - start_time)
print("%s %f" % (name, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def benchmarkGrpcSession(self):
server = server_lib.Server.create_local_server()
self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1,
30000)
session.Session.reset(server.target)
self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target,
1 << 20, 25000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1,
40000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target,
1 << 20, 20000)
session.Session.reset(server.target)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4B",
server.target, 1, 50000)
session.Session.reset(server.target)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4MB",
server.target, 1 << 20, 50000)
session.Session.reset(server.target)
self._benchmarkRunOp("benchmark_session_runop_grpc", server.target, 50000)
session.Session.reset(server.target)
self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_grpc",
server.target, 100000)
session.Session.reset(server.target)
def benchmarkDirectSession(self):
self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 80000)
self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 20000)
self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 100000)
self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20,
20000)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4B",
"", 1, 200000)
self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4MB",
"", 1 << 20, 200000)
self._benchmarkRunOp("benchmark_session_runop_direct", "", 200000)
self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_direct", "",
200000)
if __name__ == "__main__":
test.main()
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/python/client/session_benchmark.py
|
Python
|
apache-2.0
| 8,973
|
"""
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import datetime
import sys
import operator
import warnings
import pytest
import shutil
import tempfile
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestNonTupleNDIndexDeprecation(object):
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_warns(FutureWarning, a.__getitem__, [slice(None)])
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_raises(FutureWarning, a.__getitem__, [slice(None)])
# a a[[0, 1]] always was advanced indexing, so no error/warning
a[[0, 1]]
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# Element comparison error (numpy array can't be compared).
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4")
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""View of non-C-contiguous arrays deprecated in 1.11.0.
The deprecation will not be raised for arrays that are both C and F
contiguous, as C contiguous is dominant. There are more such arrays
with relaxed stride checking than without so the deprecation is not
as visible with relaxed stride checking in force.
"""
def test_fortran_contiguous(self):
self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
"""Invalid arguments to the ORDER parameter in array.flatten() should not be
allowed and should raise an error. However, in the interests of not breaking
code that may inadvertently pass invalid arguments to this parameter, a
DeprecationWarning will be issued instead for the time being to give developers
time to refactor relevant code.
"""
def test_flatten_array_non_string_arg(self):
x = np.zeros((3, 5))
self.message = ("Non-string object detected for "
"the array ordering. Please pass "
"in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=(np.pi,))
def test_flatten_array_invalid_string_arg(self):
# Tests that a DeprecationWarning is raised
# when a string of length greater than one
# starting with "C", "F", "A", or "K" (case-
# and unicode-insensitive) is passed in for
# the ORDER parameter. Otherwise, a TypeError
# will be raised!
x = np.zeros((3, 5))
self.message = ("Non length-one string passed "
"in for the array ordering. Please "
"pass in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=("FACK",))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
"""Argument to the num parameter in linspace that cannot be
safely interpreted as an integer is deprecated in 1.12.0.
Argument to the num parameter in linspace that cannot be
safely interpreted as an integer should not be allowed.
In the interest of not breaking code that passes
an argument that could still be interpreted as an integer, a
DeprecationWarning will be issued for the time being to give
developers time to refactor relevant code.
"""
def test_float_arg(self):
# 2016-02-25, PR#7328
self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestNumericStyleTypecodes(_DeprecationTestCase):
"""
Deprecate the old numeric-style dtypes, which are especially
confusing for complex types, e.g. Complex32 -> complex64. When the
deprecation cycle is complete, the check for the strings should be
removed from PyArray_DescrConverter in descriptor.c, and the
deprecated keys should not be added as capitalized aliases in
_add_aliases in numerictypes.py.
"""
def test_all_dtypes(self):
deprecated_types = [
'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
]
if sys.version_info[0] < 3:
deprecated_types.extend(['Unicode0', 'String0'])
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
"""
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
if used for division
List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
def test_int_dtypes(self):
#scramble types and do some mix and match testing
deprecated_types = [
'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
'intp', 'int64', 'uint32', 'int16'
]
if sys.version_info[0] < 3 and sys.py3kwarning:
import operator as op
dt2 = 'bool_'
for dt1 in deprecated_types:
a = np.array([1,2,3], dtype=dt1)
b = np.array([1,2,3], dtype=dt2)
self.assert_deprecated(op.div, args=(a,b))
dt2 = dt1
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
WRITEBACKIFCOPY instead
"""
def test_npy_updateifcopy_deprecation(self):
from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
arr = np.arange(9).reshape(3, 3)
v = arr.T
self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestAlen(_DeprecationTestCase):
# 2019-08-02, 1.18.0
def test_alen(self):
self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestSctypeNA(_VisibleDeprecationTestCase):
# 2018-06-24, 1.16
def test_sctypeNA(self):
self.assert_deprecated(lambda: np.sctypeNA['?'])
self.assert_deprecated(lambda: np.typeNA['?'])
self.assert_deprecated(lambda: np.typeNA.get('?'))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level API can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
|
MSeifert04/numpy
|
numpy/core/tests/test_deprecations.py
|
Python
|
bsd-3-clause
| 24,541
|
lines = open('line.txt', 'r').readlines()
lines_set = set(lines)
out = open('workfile.txt', 'w')
for line in lines_set:
out.write(line)
|
miller-tamil/research_on_email_marketting
|
web1_ads/Line_separator.py
|
Python
|
apache-2.0
| 144
|
__version__ = "1.3.15"
|
F5Networks/f5-icontrol-rest-python
|
icontrol/__init__.py
|
Python
|
apache-2.0
| 23
|
import cherrypy
# 這是 CDBG30 類別的定義
class CDBG30(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDB 協同專案下的 cdbg30 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="cube1">cdbg30 正方體參數繪圖</a>(尺寸變數 a, b, c)<br /><br />
<a href="fourbar1">四連桿組立</a><br /><br />
請確定下列連桿位於 V:/home/fourbar 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/fourbar.7z">fourbar.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
'''
假如採用下列規畫
import programs.cdbg30 as cdbg30
root.cdbg30 = cdbg30.CDBG30()
則程式啟動後, 可以利用 /cdag30/cube1 呼叫函式執行
'''
@cherrypy.expose
def cube1(self, *args, **kwargs):
'''
// 假如要自行打開特定零件檔案
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("axle_5.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("axle_5.prt"));
var solid = session.GetModel("axle_5.prt",pfcCreate("pfcModelType").MDL_PART);
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcParameterExamples.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcComponentFeatExamples.js"></script>
</head>
<body>
<script type="text/javascript">
var session = pfcGetProESession ();
// 以目前所開啟的檔案為 solid model
// for volume
var solid = session.CurrentModel;
var a, b, c, i, j, aValue, bValue, cValue, volume, count;
// 將模型檔中的 a 變數設為 javascript 中的 a 變數
a = solid.GetParam("a");
b = solid.GetParam("b");
c = solid.GetParam("c");
volume=0;
count=0;
try
{
for(i=0;i<5;i++)
{
myf = 100;
myn = myf + i*10;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
aValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
bValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
a.Value = aValue;
b.Value = bValue;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
//var newfile = document.pwl.pwlMdlSaveAs("filename.prt", "v:/tmp", "filename_5_"+count+".prt");
// 測試 stl 轉檔
//var stl_csys = "PRT_CSYS_DEF";
//var stl_instrs = new pfcCreate ("pfcSTLASCIIExportInstructions").Create(stl_csys);
//stl_instrs.SetQuality(10);
//solid.Export("v:/tmp/filename_5_"+count+".stl", stl_instrs);
// 結束測試轉檔
//if (!newfile.Status) {
//alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
//}
} // for loop
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def fourbar1(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
<script type="text/javascript">
if (!pfcIsWindows())
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
var session = pfcGetProESession();
// 設定 config option
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣
var identityMatrix = pfcCreate ("pfcMatrix3D");
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set (x, y, 1.0);
else
identityMatrix.Set (x, y, 0.0);
}
var transf = pfcCreate ("pfcTransform3D").Create (identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件
if (model == void null || model.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
var assembly = model;
/**----------------------------------------------- link0 -------------------------------------------------------------**/
//檔案目錄,建議將圖檔放置工作目錄下較方便使用
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt");
// 若 link1.prt 在 session 則直接取用
var componentModel = session.GetModelFromDescr (descr);
//若 link1.prt 不在 session 則從工作目錄中載入 session
var componentModel = session.RetrieveModel(descr);
//若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != void null)
{
//注意這個 asmcomp 即為設定約束條件的本體
//asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 建立約束條件變數
var constrs = pfcCreate("pfcComponentConstraints");
//設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
var asmDatums = new Array("ASM_FRONT", "ASM_TOP", "ASM_RIGHT");
//設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
var compDatums = new Array("FRONT", "TOP", "RIGHT");
//建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
var ids = pfcCreate("intseq");
//建立路徑變數
var path = pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids);
//採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
//利用迴圈分別約束組立與零件檔中的三個定位平面
for (var i = 0; i < 3; i++)
{
//設定組立參考面
var asmItem = assembly.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]);
//若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
//設定零件參考面
var compItem = componentModel.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i]);
// 若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, path);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (false, false);
// 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr);
}
// 設定組立約束條件
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link1 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link1.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
// 將所取得的零件, 以 transf 座標轉換矩陣所示方位放入組立檔
var asmcomp = assembly.AssembleComponent(componentModel, transf);
}
// 依零件特徵型別從 assembly 物件中, 建立一個組立特徵物件
var components = assembly.ListFeaturesByType(true, pfcCreate("pfcFeatureType").FEATTYPE_COMPONENT);
// 從組立特徵物件變數中 index 為 0 的特徵, 取出其特徵 id
var featID = components.Item(0).Id;
// 將所取到的特徵 id 附加在 sequence of integer 變數 ids 數列中
ids.Append(featID);
// 利用 ids 在 assembly 物件中, 建立對應的次組立特徵路徑
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath( assembly, ids);
// 從此特徵路徑中取出次組立物件, 設為 subassembly 物件
subassembly = subPath.Leaf;
var asmDatums = new Array("A_1", "TOP", "ASM_TOP");
var compDatums = new Array("A_1", "TOP", "TOP");
// 建立一個 ALIGN 加上 MATE 的組立關係陣列變數
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
// 建立一個以元件平面為基準的模型元件類別陣列變數
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
// 建立一個元件約束條件變數
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
// 從次組立件中, 以平面定位取出對應用的平面
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
// 若無法取得此定位平面, 則採互動模式選擇
interactFlag = true;
continue;
}
// 依照相同的平面定位方式, 從 componentModel 中取出要組立的對應平面
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate("MpfcSelect");
// 根據 subPath 選擇 asmItem, 也就是次組立元件的定位平面
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
// 從 根數列中選擇 compItem 定位用的平面
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
// 根據 ALIGN 建立約束關係變數
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
// 分別將 asmSel 與 compSel 套入約束關係中
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
// 除了所選擇的約束關係外, 其餘約束套用內定關係 (因為第二個變數為 false)
// 若第二個變數為 true, 則表示不要套用所選擇約束關係之外的內定約束
// 若要組立 closed-chain, 則最後一個連桿必須設為 true, 表示不強加內定的約束關係
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
// 將組立條件附加到 constrs 約束條件變數
constrs.Append(constr);
}
// 利用 constrs 約束關係設定 asmcomp 物件的組立約束
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def nutcracker(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
<script type="text/javascript">
if (!pfcIsWindows())
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
var session = pfcGetProESession();
// 設定 config option
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣
var identityMatrix = pfcCreate ("pfcMatrix3D");
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set (x, y, 1.0);
else
identityMatrix.Set (x, y, 0.0);
}
var transf = pfcCreate ("pfcTransform3D").Create (identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件
if (model == void null || model.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
var assembly = model;
/**----------------------------------------------- fix -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("V:/home/nutcracker/fix.prt");
// 若 link1.prt 在 session 則直接取用
var componentModel = session.GetModelFromDescr (descr);
//若 link1.prt 不在 session 則從工作目錄中載入 session
var componentModel = session.RetrieveModel(descr);
//若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != void null)
{
//注意這個 asmcomp 即為設定約束條件的本體
//asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 建立約束條件變數
var constrs = pfcCreate ("pfcComponentConstraints");
//設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
var asmDatums = new Array ("ASM_FRONT", "ASM_TOP", "ASM_RIGHT");
//設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
var compDatums = new Array ("FRONT", "TOP", "RIGHT");
//建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
var ids = pfcCreate ("intseq");
//建立路徑變數
var path = pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids);
//採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
//利用迴圈分別約束組立與零件檔中的三個定位平面
for (var i = 0; i < 3; i++)
{
//設定組立參考面
var asmItem = assembly.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]);
//若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
//設定零件參考面
var compItem = componentModel.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i]);
//若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, path);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (false, false);
//將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr);
}
//設定組立約束條件
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- fixture -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("V:/home/nutcracker/fixture.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
var featID = components.Item(0).Id;
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "RIGHT");
var compDatums = new Array ("A_3", "DTM1");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- cracker -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("V:/home/nutcracker/cracker.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_4");
var compDatums = new Array ("A_1");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("V:/home/nutcracker/link.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2","DTM1");
var compDatums = new Array ("A_1","DTM1");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- handle -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("V:/home/nutcracker/handle.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_3","DTM3");
var compDatums = new Array ("A_1","DTM1");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID+3);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "DTM1");
var compDatums = new Array ("A_2", "DTM1");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
|
2014cdbg5/cdbg5
|
wsgi/programs/cdbg30/__init__.py
|
Python
|
gpl-2.0
| 32,107
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import netaddr
import os
import re
import shutil
import tempfile
import time
import urllib
from nova import block_device
from nova import compute
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import network
from nova import rpc
from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.compute import vm_states
from nova.image import s3
FLAGS = flags.FLAGS
flags.DECLARE('service_down_time', 'nova.scheduler.driver')
LOG = logging.getLogger("nova.api.cloud")
def _gen_key(context, user_id, key_name):
"""Generate a key
This is a module level method because it is slow and we need to defer
it into a process pool."""
# NOTE(vish): generating key pair is slow so check for legal
# creation before creating key_pair
try:
db.key_pair_get(context, user_id, key_name)
raise exception.KeyPairExists(key_name=key_name)
except exception.NotFound:
pass
private_key, public_key, fingerprint = crypto.generate_key_pair()
key = {}
key['user_id'] = user_id
key['name'] = key_name
key['public_key'] = public_key
key['fingerprint'] = fingerprint
db.key_pair_create(context, key)
return {'private_key': private_key, 'fingerprint': fingerprint}
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending | running | shutting-down | terminated | stopping | stopped
_STATE_DESCRIPTION_MAP = {
None: 'pending',
vm_states.ACTIVE: 'running',
vm_states.BUILDING: 'pending',
vm_states.REBUILDING: 'pending',
vm_states.DELETED: 'terminated',
vm_states.SOFT_DELETE: 'terminated',
vm_states.STOPPED: 'stopped',
vm_states.MIGRATING: 'migrate',
vm_states.RESIZING: 'resize',
vm_states.PAUSED: 'pause',
vm_states.SUSPENDED: 'suspend',
vm_states.RESCUED: 'rescue',
}
def state_description_from_vm_state(vm_state):
"""Map the vm state to the server status string"""
return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
# TODO(yamahata): hypervisor dependent default device name
_DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': _DEFAULT_ROOT_DEVICE_NAME,
'swap': 'sda3'}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
id = ec2utils.ec2_id_to_id(ec2_id)
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = id
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = id
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Contruct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType"""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
volume_api=self.volume_api)
self.setup()
def __str__(self):
return 'CloudController'
def setup(self):
""" Ensure the keychains and folders exist. """
# FIXME(ja): this should be moved to a nova-manage command,
# if not setup throw exceptions instead of running
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
'CA',
'genrootca.sh')
start = os.getcwd()
if not os.path.exists(FLAGS.ca_path):
os.makedirs(FLAGS.ca_path)
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
utils.runthis(_("Generating root CA: %s"), "sh", genrootca_sh_path)
os.chdir(start)
def _get_mpi_data(self, context, project_id):
result = {}
search_opts = {'project_id': project_id}
for instance in self.compute_api.get_all(context,
search_opts=search_opts):
if instance['fixed_ips']:
line = '%s slots=%d' % (instance['fixed_ips'][0]['address'],
instance['vcpus'])
key = str(instance['key_name'])
if key in result:
result[key].append(line)
else:
result[key] = [line]
return result
def _get_availability_zone_by_host(self, context, host):
services = db.service_get_all_by_host(context.elevated(), host)
if len(services) > 0:
return services[0]['availability_zone']
return 'unknown zone'
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def _format_instance_mapping(self, ctxt, instance_ref):
root_device_name = instance_ref['root_device_name']
if root_device_name is None:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = block_device.strip_dev(root_device_name)
mappings['root'] = root_device_name
default_local_device = instance_ref.get('default_local_device')
if default_local_device:
mappings['ephemeral0'] = default_local_device
default_swap_device = instance_ref.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
# 'ephemeralN', 'swap' and ebs
for bdm in db.block_device_mapping_get_all_by_instance(
ctxt, instance_ref['id']):
if bdm['no_device']:
continue
# ebs volume case
if (bdm['volume_id'] or bdm['snapshot_id']):
ebs_devices.append(bdm['device_name'])
continue
virtual_name = bdm['virtual_name']
if not virtual_name:
continue
if block_device.is_swap_or_ephemeral(virtual_name):
mappings[virtual_name] = bdm['device_name']
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
nebs = 0
ebs_devices.sort()
for ebs in ebs_devices:
mappings['ebs%d' % nebs] = ebs
nebs += 1
return mappings
def get_metadata(self, address):
ctxt = context.get_admin_context()
search_opts = {'fixed_ip': address}
try:
instance_ref = self.compute_api.get_all(ctxt,
search_opts=search_opts)
except exception.NotFound:
instance_ref = None
if not instance_ref:
return None
# This ensures that all attributes of the instance
# are populated.
instance_ref = db.instance_get(ctxt, instance_ref[0]['id'])
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
hostname = instance_ref['hostname']
host = instance_ref['host']
availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
image_ec2_id = self.image_ec2_id(instance_ref['image_ref'])
security_groups = db.security_group_get_by_instance(ctxt,
instance_ref['id'])
security_groups = [x['name'] for x in security_groups]
mappings = self._format_instance_mapping(ctxt, instance_ref)
data = {
'user-data': self._format_user_data(instance_ref),
'meta-data': {
'ami-id': image_ec2_id,
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': mappings,
'hostname': hostname,
'instance-action': 'none',
'instance-id': ec2_id,
'instance-type': instance_ref['instance_type']['name'],
'local-hostname': hostname,
'local-ipv4': address,
'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'reservation-id': instance_ref['reservation_id'],
'security-groups': security_groups,
'mpi': mpi}}
# public-keys should be in meta-data only if user specified one
if instance_ref['key_name']:
data['meta-data']['public-keys'] = {
'0': {'_name': instance_ref['key_name'],
'openssh-key': instance_ref['key_data']}}
for image_type in ['kernel', 'ramdisk']:
if instance_ref.get('%s_id' % image_type):
ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type],
self._image_type(image_type))
data['meta-data']['%s-id' % image_type] = ec2_id
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
data['product-codes'] = []
return data
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
enabled_services = db.service_get_all(ctxt, False)
disabled_services = db.service_get_all(ctxt, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
if not zone in available_zones:
available_zones.append(zone)
not_available_zones = []
for zone in [service.availability_zone for service in disabled_services
if not service['availability_zone'] in available_zones]:
if not zone in not_available_zones:
not_available_zones.append(zone)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
services = db.service_get_all(context, False)
now = utils.utcnow()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
hosts.append(host)
for host in hosts:
rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host,
'zoneState': ''})
hsvcs = [service for service in services \
if service['host'] == host]
for svc in hsvcs:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= FLAGS.service_down_time)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
rv['availabilityZoneInfo'].append({
'zoneName': '| |- %s' % svc['binary'],
'zoneState': '%s %s %s' % (active, art,
svc['updated_at'])})
return rv
def describe_regions(self, context, region_name=None, **kwargs):
if FLAGS.region_list:
regions = []
for region in FLAGS.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme,
host,
FLAGS.ec2_port,
FLAGS.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
FLAGS.ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
snapshots = [self._format_snapshot(context, s) for s in snapshots]
return {'snapshotSet': snapshots}
def _format_snapshot(self, context, snapshot):
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = snapshot['status']
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
s['display_name'] = snapshot['display_name']
s['display_description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_id_to_id(volume_id)
snapshot = self.volume_api.create_snapshot(
context,
volume_id=volume_id,
name=kwargs.get('display_name'),
description=kwargs.get('display_description'))
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_id_to_id(snapshot_id)
self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
if context.is_admin or \
not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
data = _gen_key(context, context.user_id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
'keyMaterial': data['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_public_key(self, context, key_name, public_key,
fingerprint=None):
LOG.audit(_("Import key %s"), key_name, context=context)
key = {}
key['user_id'] = context.user_id
key['name'] = key_name
key['public_key'] = public_key
if fingerprint is None:
tmpdir = tempfile.mkdtemp()
pubfile = os.path.join(tmpdir, 'temp.pub')
fh = open(pubfile, 'w')
fh.write(public_key)
fh.close()
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
'%s' % (pubfile))
fingerprint = out.split(' ')[1]
shutil.rmtree(tmpdir)
key['fingerprint'] = fingerprint
db.key_pair_create(context, key)
return True
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
db.key_pair_destroy(context, context.user_id, key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
self.compute_api.ensure_default_security_group(context)
if group_name or group_id:
groups = []
if group_name:
for name in group_name:
group = db.security_group_get_by_name(context,
context.project_id,
name)
groups.append(group)
if group_id:
for gid in group_id:
group = db.security_group_get(context, gid)
groups.append(group)
elif context.is_admin:
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project_id)
groups = [self._format_security_group(context, g) for g in groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group.description
g['groupName'] = group.name
g['ownerId'] = group.project_id
g['ipPermissions'] = []
for rule in group.rules:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
source_group = db.security_group_get(context, rule.group_id)
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.project_id}]
if rule.protocol:
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65536)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
r['ipRanges'] += [{'cidrIp': rule.cidr}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if not 'groups' in kwargs and not 'ip_ranges' in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
values = {}
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = \
db.security_group_get_by_name(context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
values['group_id'] = source_security_group['id']
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
netaddr.IPNetwork(cidr_ip)
values['cidr'] = cidr_ip
else:
values['cidr'] = '0.0.0.0/0'
if ip_protocol and from_port and to_port:
from_port = int(from_port)
to_port = int(to_port)
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port)
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if 'cidr' in values:
return None
return values
def _security_group_rule_exists(self, security_group, values):
"""Indicates whether the specified rule values are already
defined in the given security group.
"""
for rule in security_group.rules:
if 'group_id' in values:
if rule['group_id'] == values['group_id']:
return rule['id']
else:
is_duplicate = True
for key in ('cidr', 'from_port', 'to_port', 'protocol'):
if rule[key] != values[key]:
is_duplicate = False
break
if is_duplicate:
return rule['id']
return False
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
if group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Revoke security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
except KeyError:
prevalues.append(kwargs)
rule_id = None
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
err = "%s Not enough parameters to build a valid rule"
raise exception.ApiError(_(err % rulesvalues))
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
rule_id = self._security_group_rule_exists(security_group,
values_for_rule)
if rule_id:
db.security_group_rule_destroy(context, rule_id)
if rule_id:
# NOTE(vish): we removed a rule, so refresh
self.compute_api.trigger_security_group_rules_refresh(
context,
security_group_id=security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
if group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
msg = "Authorize security group ingress %s"
LOG.audit(_(msg), security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
except KeyError:
prevalues.append(kwargs)
postvalues = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
err = "%s Not enough parameters to build a valid rule"
raise exception.ApiError(_(err % rulesvalues))
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group,
values_for_rule):
err = '%s - This rule already exists in group'
raise exception.ApiError(_(err) % values_for_rule)
postvalues.append(values_for_rule)
for values_for_rule in postvalues:
security_group_rule = db.security_group_rule_create(
context,
values_for_rule)
if postvalues:
self.compute_api.trigger_security_group_rules_refresh(
context,
security_group_id=security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
err = _("Value (%s) for parameter GroupName is invalid."
" Content limited to Alphanumeric characters, "
"spaces, dashes, and underscores.") % group_name
# err not that of master ec2 implementation, as they fail to raise.
raise exception.InvalidParameterValue(err=err)
if len(str(group_name)) > 255:
err = _("Value (%s) for parameter GroupName is invalid."
" Length exceeds maximum of 255.") % group_name
raise exception.InvalidParameterValue(err=err)
LOG.audit(_("Create Security Group %s"), group_name, context=context)
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': group_name,
'description': group_description}
group_ref = db.security_group_create(context, group)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = "Not enough parameters, need group_name or group_id"
raise exception.ApiError(_(err))
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
if not security_group:
raise notfound(security_group_id=group_name)
elif group_id:
security_group = db.security_group_get(context, group_id)
if not security_group:
raise notfound(security_group_id=group_id)
LOG.audit(_("Delete security group %s"), group_name, context=context)
db.security_group_destroy(context, security_group.id)
return True
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if type(instance_id) == list:
ec2_id = instance_id[0]
else:
ec2_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = utils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
instance_id = ec2utils.ec2_id_to_id(ec2_id)
return self.compute_api.get_ajax_console(context,
instance_id=instance_id)
def get_vnc_console(self, context, instance_id, **kwargs):
"""Returns vnc browser url. Used by OS dashboard."""
ec2_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_id)
return self.compute_api.get_vnc_console(context,
instance_id=instance_id)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
volume = self.volume_api.get(context, volume_id=internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance', None):
instance_id = volume['instance']['id']
instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.is_admin:
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume['project_id'],
volume['host'],
instance_data,
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') != None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
v['display_name'] = volume['display_name']
v['display_description'] = volume['display_description']
return v
def create_volume(self, context, **kwargs):
size = kwargs.get('size')
if kwargs.get('snapshot_id') != None:
snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id'])
LOG.audit(_("Create volume from snapshot %s"), snapshot_id,
context=context)
else:
snapshot_id = None
LOG.audit(_("Create volume of %s GB"), size, context=context)
volume = self.volume_api.create(
context,
size=size,
snapshot_id=snapshot_id,
name=kwargs.get('display_name'),
description=kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
volume_id = ec2utils.ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
volume_id = ec2utils.ec2_id_to_id(volume_id)
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
self.volume_api.update(context,
volume_id=volume_id,
fields=changes)
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2utils.ec2_id_to_id(volume_id)
instance_id = ec2utils.ec2_id_to_id(instance_id)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
device=device)
volume = self.volume_api.get(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def detach_volume(self, context, volume_id, **kwargs):
volume_id = ec2utils.ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id=volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_id(instance['id']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, instance_ref, result, key):
kernel_id = instance_ref['kernel_id']
if kernel_id is None:
return
result[key] = self.image_ec2_id(instance_ref['kernel_id'], 'aki')
def _format_ramdisk_id(self, instance_ref, result, key):
ramdisk_id = instance_ref['ramdisk_id']
if ramdisk_id is None:
return
result[key] = self.image_ec2_id(instance_ref['ramdisk_id'], 'ari')
@staticmethod
def _format_user_data(instance_ref):
return base64.b64decode(instance_ref['user_data'])
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.ApiError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance_id,
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
vm_state = instance['vm_state']
state_to_value = {
vm_states.STOPPED: 'stopped',
vm_states.DELETED: 'terminated',
}
value = state_to_value.get(vm_state)
if value:
result['instanceInitiatedShutdownBehavior'] = value
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = self._format_user_data(instance)
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.ApiError(
_('attribute not supported: %s') % attribute)
ec2_instance_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
instance = self.compute_api.get(context, instance_id)
result = {'instance_id': ec2_instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
return self._format_describe_instances(context,
instance_id=instance_id)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
return self._format_describe_instances(context,
instance_id=instance_id, use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_instance_bdm(self, context, instance_id, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType"""
root_device_type = 'instance-store'
mapping = []
for bdm in db.block_device_mapping_get_all_by_instance(context,
instance_id):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id=volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': volume_id,
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '-',
'status': vol['status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
_DEFAULT_ROOT_DEVICE_NAME)
@staticmethod
def _format_instance_type(instance, result):
if instance['instance_type']:
result['instanceType'] = instance['instance_type'].get('name')
else:
result['instanceType'] = None
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
**search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
try:
instance = self.compute_api.get(context, internal_id)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts)
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if instance['image_ref'] == str(FLAGS.vpn_image_id):
continue
i = {}
instance_id = instance['id']
ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
i['imageId'] = self.image_ec2_id(instance['image_ref'])
self._format_kernel_id(instance, i, 'kernelId')
self._format_ramdisk_id(instance, i, 'ramdiskId')
i['instanceState'] = {
'code': instance['power_state'],
'name': state_description_from_vm_state(instance['vm_state'])}
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
fixed = instance['fixed_ips'][0]
fixed_addr = fixed['address']
if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
if fixed['network'] and use_v6:
i['dnsNameV6'] = ipv6.to_global(
fixed['network']['cidr_v6'],
fixed['virtual_interface']['address'],
instance['project_id'])
i['privateDnsName'] = fixed_addr
i['privateIpAddress'] = fixed_addr
i['publicDnsName'] = floating_addr
i['ipAddress'] = floating_addr or fixed_addr
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
i['displayName'] = instance['display_name']
i['displayDescription'] = instance['display_description']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance_id,
i['rootDeviceName'], i)
host = instance['host']
zone = self._get_availability_zone_by_host(context, host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, **kwargs):
return self.format_addresses(context)
def format_addresses(self, context):
addresses = []
if context.is_admin:
iterator = db.floating_ip_get_all(context)
else:
iterator = db.floating_ip_get_all_by_project(context,
context.project_id)
for floating_ip_ref in iterator:
if floating_ip_ref['project_id'] is None:
continue
address = floating_ip_ref['address']
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
ec2_id = ec2utils.id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
address_rv['instance_id'] = details
addresses.append(address_rv)
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
return {'publicIp': public_ip}
except rpc.RemoteError as ex:
# NOTE(tr3buchet) - why does this block exist?
if ex.exc_type == 'NoMoreFloatingIps':
raise exception.NoMoreFloatingIps()
else:
raise
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
self.network_api.release_floating_ip(context, address=public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
self.network_api.disassociate_floating_ip(context, address=public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = kernel['id']
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFound(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.ApiError(_('Image must be available'))
instances = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_href=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
display_description=kwargs.get('display_description'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context,
reservation_id=instances[0]['reservation_id'])
def _do_instance(self, action, context, ec2_id):
instance_id = ec2utils.ec2_id_to_id(ec2_id)
action(context, instance_id=instance_id)
def _do_instances(self, action, context, instance_id):
for ec2_id in instance_id:
self._do_instance(action, context, ec2_id)
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
self._do_instances(self.compute_api.delete, context, instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
self._do_instances(self.compute_api.reboot, context, instance_id)
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to stop instances"))
self._do_instances(self.compute_api.stop, context, instance_id)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
LOG.debug(_("Going to start instances"))
self._do_instances(self.compute_api.start, context, instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
self._do_instance(self.compute_api.rescue, context, instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
self._do_instance(self.compute_api.unrescue, context, instance_id)
return True
def update_instance(self, context, instance_id, **kwargs):
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id,
**changes)
return True
@staticmethod
def _image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
@staticmethod
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
try:
return ec2utils.id_to_ec2_id(int(image_id), template=template)
except ValueError:
#TODO(wwolf): once we have ec2_id -> glance_id mapping
# in place, this wont be necessary
return "ami-00000000"
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if self._image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = self._image_type(image.get('container_format'))
ec2_id = self.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = self.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image['properties'].get('owner_id')
if name:
i['imageLocation'] = "%s (%s)" % (image['properties'].
get('image_location'), name)
else:
i['imageLocation'] = image['properties'].get('image_location')
i['imageState'] = self._get_image_state(image)
i['displayName'] = name
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = image.get('is_public') == True
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (bdm.get('device_name') == root_device_name and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or _DEFAULT_ROOT_DEVICE_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return {'imageId': image_id}
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = self._image_type(image.get('container_format'))
image_id = self.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
metadata = {'properties': {'image_location': image_location}}
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = \
kwargs.get('root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
result['rootDeviceName'] = \
block_device.properties_root_device_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
if not 'user_group' in kwargs:
raise exception.ApiError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
return self.image_service.update(context, internal_id, image)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
ec2_instance_id = instance_id
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
instance = self.compute_api.get(context, instance_id)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance_id=instance_id)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_id)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60 * 60
if time.time() > start_time + timeout:
raise exception.ApiError(
_('Couldn\'t stop instance with in %d sec') % timeout)
src_image = self._get_image(context, instance['image_ref'])
properties = src_image['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
mapping = []
bdms = db.block_device_mapping_get_all_by_instance(context,
instance_id)
for bdm in bdms:
if bdm.no_device:
continue
m = {}
for attr in ('device_name', 'snapshot_id', 'volume_id',
'volume_size', 'delete_on_termination', 'no_device',
'virtual_name'):
val = getattr(bdm, attr)
if val is not None:
m[attr] = val
volume_id = m.get('volume_id')
if m.get('snapshot_id') and volume_id:
# create snapshot based on volume_id
vol = self.volume_api.get(context, volume_id=volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
snapshot = self.volume_api.create_snapshot_force(
context, volume_id=volume_id, name=vol['display_name'],
description=vol['display_description'])
m['snapshot_id'] = snapshot['id']
del m['volume_id']
if m:
mapping.append(m)
for m in _properties_get_mappings(properties):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
src_image.pop(attr, None)
image_id = self._register_image(context, src_image)
if restart_instance:
self.compute_api.start(context, instance_id=instance_id)
return {'imageId': image_id}
|
30loops/nova
|
nova/api/ec2/cloud.py
|
Python
|
apache-2.0
| 75,405
|
from django.contrib.admin.views.decorators import staff_member_required
from task_queue import tasks
from api.v3.resources.activity_view_resources import HttpResponse
# PARSE TASKS
@staff_member_required
def add_task(request):
import django_rq
task = request.GET.get('task')
parameters = request.GET.get('parameters')
queue_to_be_added_to = request.GET.get('queue')
queue = django_rq.get_queue(queue_to_be_added_to)
if parameters:
queue.enqueue(getattr(tasks, task), args=(parameters,), timeout=7200)
else:
queue.enqueue(getattr(tasks, task), timeout=7200)
return HttpResponse('Success')
# TASK QUEUE MANAGEMENT
@staff_member_required
def start_worker_with_supervisor(request):
from django.core.management import call_command
action = request.GET.get('action')
worker_program = request.GET.get('worker_program')
list = [action, worker_program]
call_command('supervisor', *list)
return HttpResponse('Success')
@staff_member_required
def get_workers(request):
from rq import Worker, push_connection
import redis
import json
connection = redis.Redis()
push_connection(connection)
workers = Worker.all(connection=connection)
workerdata = list()
# serialize workers
for w in workers:
cj = w.get_current_job()
if cj:
cjinfo = {'id' : cj.id, 'args' : cj.args, 'enqueued_at' : cj.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'description' : cj.description}
else:
cjinfo = None
worker_dict = {'pid': w.pid, 'name': w.name, 'state': w.get_state(), 'current_job': cjinfo}
workerdata.append(worker_dict)
data = json.dumps(workerdata)
return HttpResponse(data, content_type='application/json')
@staff_member_required
def delete_task_from_queue(request):
job_id = request.GET.get('job_id')
tasks.delete_task_from_queue(job_id)
return HttpResponse('Success')
@staff_member_required
def delete_all_tasks_from_queue(request):
queue_name = request.GET.get('queue_name')
tasks.delete_all_tasks_from_queue(queue_name)
return HttpResponse('Success')
@staff_member_required
def get_current_job(request):
from rq import get_current_job
from rq import use_connection
from redis import Redis
from rq import Queue
use_connection()
redis_conn = Redis()
q = Queue(connection=redis_conn)
job = get_current_job(q)
import json
data = json.dumps(job)
return HttpResponse(data, content_type='application/json')
@staff_member_required
def test(request):
from rq import get_current_job
job = get_current_job()
import json
return json.dumps(job)
# Schedule management
@staff_member_required
def start_scheduler(request):
from rq_scheduler.scripts import rqscheduler
rqscheduler.main()
return HttpResponse('Success')
@staff_member_required
def add_scheduled_task(request):
task = request.GET.get('task')
period = request.GET.get('period')
queue = request.GET.get('queue')
parameters = request.GET.get('parameters')
from rq import use_connection
from rq_scheduler import Scheduler
from datetime import datetime
use_connection() # Use RQ's default Redis connection
scheduler = Scheduler(queue) # Get a scheduler for the "default" queue
if parameters:
scheduler.schedule(
scheduled_time=datetime.now(), # Time for first execution
func=getattr(tasks, task), # Function to be queued
args=[int(parameters)],
interval=period, # Time before the function is called again, in seconds
repeat=None # Repeat this number of times (None means repeat forever)
)
else:
scheduler.schedule(
scheduled_time=datetime.now(), # Time for first execution
func=getattr(tasks, task), # Function to be queued
interval=period, # Time before the function is called again, in seconds
repeat=None # Repeat this number of times (None means repeat forever)
)
return HttpResponse('Success')
@staff_member_required
def get_queue(request):
import django_rq
import json
current_queue = request.GET.get('queue')
queue = django_rq.get_queue(current_queue)
jobdata = list()
count_jobs = 0
for job in queue.jobs:
count_jobs += 1
if count_jobs == 20:
break
job_dict = { 'job_id': job._id, 'created_at':job.created_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'enqueued_at':job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'status': job.get_status(), 'function': job.__name__, 'args': job.args}
jobdata.append(job_dict)
data = json.dumps(jobdata)
return HttpResponse(data, content_type='application/json')
@staff_member_required
def get_scheduled_tasks(request):
from rq import use_connection
from rq_scheduler import Scheduler
import json
use_connection() # Use RQ's default Redis connection
scheduler = Scheduler() # Get a scheduler for the "default" queue
list_of_job_instances = scheduler.get_jobs()
jobdata = list()
for job in list_of_job_instances:
if "interval" in job.meta:
interval = job.meta["interval"]
else:
interval = 0
job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'args': job.args, 'queue': "default" }
jobdata.append(job_dict)
# scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue
# list_of_job_instances = scheduler.get_jobs()
#
# for job in list_of_job_instances:
# if "interval" in job.meta:
# interval = job.meta["interval"]
# else:
# interval = 0
# job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" }
# jobdata.append(job_dict)
data = json.dumps(jobdata)
return HttpResponse(data, content_type='application/json')
@staff_member_required
def cancel_scheduled_task(request):
job_id = request.GET.get('job_id')
from rq_scheduler import Scheduler
scheduler = Scheduler('parser')
scheduler.cancel(job_id)
return HttpResponse('Success')
# Failed tasks
def get_failed_tasks(request):
import django_rq
import json
from time import strftime
queue = django_rq.get_failed_queue()
jobdata = list()
for job in queue.jobs:
job_dict = { 'job_id' : job.id, 'func_name': job.__name__, 'error_message': job.exc_info, 'ended_at': job.ended_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'enqueued_at' : job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000")}
jobdata.append(job_dict)
data = json.dumps(jobdata)
return HttpResponse(data, content_type='application/json')
@staff_member_required
def reschedule_all_failed(request):
from rq import requeue_job
from rq import get_failed_queue
from django_rq import get_connection
queue = get_failed_queue(get_connection())
for job in queue.jobs:
requeue_job(job.id, connection=queue.connection)
return HttpResponse('Success')
|
catalpainternational/OIPA
|
OIPA/task_queue/views.py
|
Python
|
agpl-3.0
| 7,273
|
import os
root_folder = "/mnt/Data/Music/"
class Main:
@staticmethod
def get_all_artists():
for artist in os.listdir(root_folder):
if os.path.isdir(os.path.join(root_folder, artist)):
absolute_path = os.path.join(root_folder, artist)
icon_name = Main.get_icon_name(absolute_path, 0)
if icon_name is not None:
Main.set_icon(absolute_path, icon_name)
Main.get_all_albums_by_artist(absolute_path)
@staticmethod
def get_all_albums_by_artist(artist_folder):
for album in os.listdir(artist_folder):
if os.path.isdir(os.path.join(artist_folder, album)):
absolute_path = os.path.join(artist_folder, album)
icon_name = Main.get_icon_name(absolute_path, 1)
if icon_name is not None:
Main.set_icon(absolute_path, icon_name)
@staticmethod
def get_icon_name(folder, is_album):
for file in os.listdir(folder):
if os.path.isfile(os.path.join(folder, file)):
if is_album:
if file.startswith("cover."):
return file
else:
if file.startswith("artist."):
return file
return None
@staticmethod
def set_icon(folder, icon):
response = os.system('gvfs-set-attribute -t string %r metadata::custom-icon "file://%s"' % (folder, os.path.join(folder, icon)))
print("Cover set for" + folder)
if response != 0:
print("Error while setting cover for: " + folder)
Main.get_all_artists()
|
beschoenen/BulkIconChanger
|
main.py
|
Python
|
mit
| 1,668
|
# Roundware Server is released under the GNU Affero General Public License v3.
# See COPYRIGHT.txt, AUTHORS.txt, and LICENSE.txt in the project root directory.
# TODO: Figure out how to get the main pipeline to send EOS
# when all audiotracks are finished (only happens
# when repeat is off)
# TODO: Reimplement panning using a gst.Controller
# TODO: Remove stereo_pan from public interface
from __future__ import unicode_literals
import gobject
gobject.threads_init()
import pygst
pygst.require("0.10")
import gst
import random
import logging
import os
import time
from roundwared import src_wav_file
from roundwared import db
from django.conf import settings
from roundware.rw.models import Asset
STATE_PLAYING = 0
STATE_DEAD_AIR = 1
STATE_WAITING = 2
logger = logging.getLogger(__name__)
class AudioTrack:
######################################################################
# PUBLIC
######################################################################
def __init__(self, stream, pipeline, adder, settings, recording_collection):
self.stream = stream
self.pipeline = pipeline
self.adder = adder
self.settings = settings
self.rc = recording_collection
self.current_pan_pos = 0
self.target_pan_pos = 0
self.state = STATE_DEAD_AIR
self.src_wav_file = None
self.current_recording = None
# Incremented only after start_audio() is called.
self.track_timer = 0
def start_audio(self):
"""
Called once to start the audio manager timer
"""
def asset_start_timer():
"""
The asset timer runs once to start new assets after a certain
amount of dead air.
gobject timeout callbacks are repeated until they return False.
"""
self.add_file()
return False
def track_timer():
"""
The audio manager.
Timeout called every second to maintain the audio asset stream.
"""
logger.debug("AT State: %d, Stream state: %d" % (self.state, self.stream.get_state()))
# logger.debug("TickTock: %s" % self.track_timer)
self.track_timer += 1
# Do nothing if audio is playing already.
if self.state == STATE_PLAYING or self.stream.is_paused():
return True
# No audio playing and asset_timer_callback is not scheduled, this
# is set by self.clean_up() when an asset ends.
elif self.state == STATE_DEAD_AIR:
self.state = STATE_WAITING
# Generate a random amount of dead air.
deadair = random.randint(
self.settings.mindeadair,
self.settings.maxdeadair) / gst.MSECOND
# Attempt to start an asset in the future.
gobject.timeout_add(deadair, asset_start_timer)
return True
# http://www.pygtk.org/pygtk2reference/gobject-functions.html#function-gobject--timeout-add
# Call audio_timer_callback() every second.
gobject.timeout_add(1000, track_timer)
def stereo_pan(self):
if self.current_pan_pos == self.target_pan_pos \
or self.pan_steps_left == 0:
self.set_new_pan_target()
self.set_new_pan_duration()
else:
pan_distance = \
self.target_pan_pos - self.current_pan_pos
amount_to_pan_now = pan_distance / self.pan_steps_left
self.current_pan_pos += amount_to_pan_now
self.pan_steps_left -= 1
if self.src_wav_file:
self.src_wav_file.pan_to(self.current_pan_pos)
######################################################################
# PRIVATE
######################################################################
def add_file(self):
self.current_recording = self.rc.get_recording()
if not self.current_recording:
self.state = STATE_DEAD_AIR
self.set_track_metadata()
return
# use active region rather than entire audio file
self.current_active_region_length = int((self.current_recording.end_time - self.current_recording.start_time) * 1000000000)
duration = min(
self.current_active_region_length,
random.randint(
# FIXME: I don't allow less than a second to
# play currently. Mostly because playing zero
# is an error. Revisit this.
max(self.settings.minduration,
gst.SECOND),
max(self.settings.maxduration,
gst.SECOND)))
start = random.randint(
int((self.current_recording.start_time * 1000000000)),
int((self.current_recording.end_time * 1000000000)) - duration)
fadein = random.randint(
self.settings.minfadeintime,
self.settings.maxfadeintime)
fadeout = random.randint(
self.settings.minfadeouttime,
self.settings.maxfadeouttime)
# FIXME: Instead of doing this divide by two, instead,
# decrease them by the same percentage. Remember it's
# possible that fade_in != fade_out.
if fadein + fadeout > duration:
fadein = duration / 2
fadeout = duration / 2
volume = self.current_recording.volume * (
self.settings.minvolume +
random.random() *
(self.settings.maxvolume -
self.settings.minvolume))
# logger.debug("current_recording.filename: %s, start: %s, duration: %s, fadein: %s, fadeout: %s, volume: %s",
# self.current_recording.filename, start, duration, fadein, fadeout, volume)
logger.info("Session %s - Playing asset %s filename: %s, duration: %.2f secs" %
(self.stream.sessionid, self.current_recording.id,
self.current_recording.filename, duration / 1000000000.0))
self.src_wav_file = src_wav_file.SrcWavFile(
os.path.join(settings.MEDIA_ROOT,
self.current_recording.filename),
start, duration, fadein, fadeout, volume)
self.pipeline.add(self.src_wav_file)
self.srcpad = self.src_wav_file.get_pad('src')
self.addersinkpad = self.adder.get_request_pad('sink%d')
self.srcpad.link(self.addersinkpad)
# Add event watcher/callback
self.addersinkpad.add_event_probe(self.event_probe)
(ret, cur, pen) = self.pipeline.get_state()
self.src_wav_file.set_state(cur)
self.state = STATE_PLAYING
# Generate metadata for the current asset.
tags = [str(tag.id) for tag in self.current_recording.tags.all()]
self.set_track_metadata({'asset': self.current_recording.id,
'tags': ','.join(tags)})
db.add_asset_to_session_history(
self.current_recording.id, self.stream.sessionid, duration)
def event_probe(self, pad, event):
# End of current audio asset, start a new asset.
if event.type == gst.EVENT_EOS:
self.set_track_metadata({'asset': self.current_recording.id,
'complete': True, })
gobject.idle_add(self.clean_up)
# New asset added, seek to it's starting timestamp.
elif event.type == gst.EVENT_NEWSEGMENT:
gobject.idle_add(self.src_wav_file.seek_to_start)
return True
def clean_up(self):
if self.src_wav_file:
self.src_wav_file.set_state(gst.STATE_NULL)
self.pipeline.remove(self.src_wav_file)
self.adder.release_request_pad(self.addersinkpad)
self.state = STATE_DEAD_AIR
self.current_recording = None
self.src_wav_file = None
return False
def set_new_pan_target(self):
pan_step_size = (self.settings.maxpanpos -
self.settings.minpanpos) / \
settings.NUM_PAN_STEPS
target_pan_step = random.randint(0, settings.NUM_PAN_STEPS)
self.target_pan_pos = -1 + target_pan_step * pan_step_size
def set_new_pan_duration(self):
duration_in_gst_units = \
random.randint(
self.settings.minpanduration,
self.settings.maxpanduration)
duration_in_miliseconds = duration_in_gst_units / gst.MSECOND
self.pan_steps_left = duration_in_miliseconds / \
settings.STEREO_PAN_INTERVAL
def skip_ahead(self):
fadeoutnsecs = random.randint(
self.settings.minfadeouttime,
self.settings.maxfadeouttime)
if self.src_wav_file != None and not self.src_wav_file.fading:
logger.info("fading out for: " + str(round((fadeoutnsecs/1000000000),2)) + " sec")
self.src_wav_file.fade_out(fadeoutnsecs)
# 1st arg is in milliseconds
# 1000000000
#gobject.timeout_add(fadeoutnsecs/gst.MSECOND, self.clean_up)
# wait until fade is complete and then clean-up
time.sleep(fadeoutnsecs / 1000000000)
self.clean_up()
else:
logger.debug("skip_ahead: no src_wav_file")
def play_asset(self, asset_id):
logger.info("AudioTrack play asset: " + str(asset_id))
try:
asset = Asset.objects.get(id=str(asset_id))
self.rc.remove_asset_from_rc(asset)
self.rc.add_asset_to_rc(asset)
self.skip_ahead()
except Asset.DoesNotExist:
logger.error("Asset with ID %d does not exist." % asset_id)
def set_track_metadata(self, metadata={}):
"""
Sets Audiotrack specific metadata.
"""
data = {'audiotrack': self.settings.id,
'remaining': self.rc.count(),
}
data.update(metadata)
self.stream.set_metadata(data)
|
probabble/roundware-server
|
roundwared/audiotrack.py
|
Python
|
agpl-3.0
| 10,047
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jax recurrent layer implementation.
The main interface of this module is recurrent_func().
This expects the caller to describe the recurrent neural net by specifying:
- theta: the "weights" each RNN uses.
- states_0: the initial state of each RNN.
- cell_fn: A python function describing RNN cell. It must have the following
signature::
cell_fn: (theta, states_0, inputs) -> states_1
states_1 is the next RNN state.
recurrent_func computes, roughly::
state = states_0
t = 0
while t < seq_length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
t += 1
return state, accumulate_state
"""
import enum
import functools
from typing import Callable, Optional, Tuple
import jax
from jax import ad_checkpoint
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
import tensorflow.compat.v2 as tf
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
ParamsT = pytypes.ParamsT
JTensor = pytypes.JTensor
CallableOrNone = Optional[Callable]
NestedMapOrNone = Optional[NestedMap]
@enum.unique
class AutodiffCheckpointType(str, enum.Enum):
"""jax.checkpoint policy types."""
SAVE_NOTHING = 'save_nothing'
SAVE_EVERYTHING = 'save_everything'
SAVE_QKV_OUT_PROJ = 'save_qkv_out_proj'
SAVE_OUT_PROJ = 'save_out_proj'
SAVE_CONTEXT = 'save_context'
SAVE_CONTEXT_AND_OUT_PROJ = 'save_encoded_and_out_proj'
SAVE_DOT_ONLY = 'save_dot_only'
SAVE_DOT_WITH_NO_BATCH_DIM = 'save_dot_with_no_batch_dims'
SAVE_DOT_FOR_MLPERF_200B = 'save_dot_for_mlperf_200b'
def recurrent_func(theta: NestedMap, states_0: NestedMap, inputs: NestedMap,
cell_fn: Callable[[NestedMap, NestedMap, NestedMap],
NestedMap]):
"""Computes a recurrent neural net.
Args:
theta: weights. A `.NestedMap`.
states_0: initial state. A `.NestedMap`.
inputs: inputs. A `.NestedMap`.
cell_fn: A python function which computes::
states_1 = cell_fn(theta, states_0, inputs[t, :])
Returns:
`accumulate_state` and the final state.
"""
input_seq_len = inputs.Flatten()[0].shape[0]
def assert_not_none(x):
assert x is not None
tf.nest.map_structure(assert_not_none, states_0)
tf.nest.map_structure(assert_not_none, inputs)
tf.nest.map_structure(assert_not_none, theta)
def new_cum_state(x):
x1 = jnp.expand_dims(x, 0)
# +1 so that we can store initial_states at position 0.
return jnp.tile(x1, [input_seq_len + 1] + [1] * x.ndim)
cumulative_states = states_0.Transform(new_cum_state)
prng_key = base_layer.next_prng_key()
global_step = base_layer.cur_global_step()
start_time = jnp.array(0, dtype=jnp.uint32)
fwd_initial_loop_vars = NestedMap(
cur_time=start_time,
theta=theta,
states_0=states_0,
cumulative_states=cumulative_states,
inputs=inputs)
def same_type_shape(x, y):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
assert x.shape == y.shape, (x.shape, y.shape)
def wrapped_cell_fn(fn_in):
# fn_in is NestedMap containing the following elements:
# - t
# - theta
# - states_0
# - inputs_t
# Start a chain of prng key that also takes into account of time steps.
t = fn_in.t
theta = fn_in.theta
states_0 = fn_in.states_0
inputs_t = fn_in.inputs_t
with base_layer.JaxContext.new_context(
prng_key=jax.random.fold_in(prng_key, t), global_step=global_step):
# NO side-effect ops are allowed as the enclosing JaxContext is not bound
# to any layer.
states_1 = cell_fn(theta, states_0, inputs_t)
tf.nest.assert_same_structure(states_0, states_1)
tf.nest.map_structure(same_type_shape, states_0, states_1)
return states_1
def wrapped_cell_fn_grad(fn_in, d_fn_out):
# This is roughly the following:
#
# fn_out = wrapped_cell_fn(fn_in)
# d_fn_in = tf.gradient(fn_out, fn_in, d_fn_out)
# return d_fn_in
#
assert isinstance(fn_in, NestedMap)
fn_out, vjp_fn = jax.vjp(wrapped_cell_fn, fn_in)
del fn_out
d_fn_in = vjp_fn(d_fn_out)
assert isinstance(d_fn_in, tuple)
assert len(d_fn_in) == 1
d_fn_in_0 = d_fn_in[0]
# Over-write gradient for t, the time step.
d_fn_in_0.t = jnp.zeros_like(fn_in.t)
tf.nest.assert_same_structure(fn_in, d_fn_in_0)
tf.nest.map_structure(same_type_shape, fn_in, d_fn_in_0)
return d_fn_in_0
def fwd_comp_fn(loop_vars):
# loop_vars is a NestedMap containing the following elements:
# - cur_time
# - theta
# - inputs
# - cumulative_states
# - states_0
t = loop_vars.cur_time
theta = loop_vars.theta
inputs = loop_vars.inputs
cumulative_states = loop_vars.cumulative_states
states_0 = loop_vars.states_0
inputs_t = inputs.Transform(lambda x: x[t])
states_1 = wrapped_cell_fn(
NestedMap(t=t, theta=theta, states_0=states_0, inputs_t=inputs_t))
def set_t(x, x_t):
return x.at[t + 1].set(x_t)
cumulative_states = tf.nest.map_structure(set_t, cumulative_states,
states_1)
loop_out = NestedMap(
cur_time=t + 1,
theta=theta,
inputs=inputs,
states_0=states_1,
cumulative_states=cumulative_states)
return loop_out
def fwd_continue_fn(loop_vars):
return loop_vars.cur_time < input_seq_len
# This custom_vjp implementation follows examples here:
# https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html
@jax.custom_vjp
def fwd_loop(loop_vars):
final_loop_vars = jax.lax.while_loop(fwd_continue_fn, fwd_comp_fn,
loop_vars)
return NestedMap(
final_states=final_loop_vars.states_0,
cumulative_states=final_loop_vars.cumulative_states)
def loop_fn_vjp_fwd(loop_vars):
loop_fn_out = fwd_loop(loop_vars)
return loop_fn_out, (loop_vars, loop_fn_out.cumulative_states)
def loop_fn_vjp_bwd(res, d_out):
fwd_loop_vars, cumulative_states = res
d_final_states = d_out.final_states
d_cumulative_states = d_out.cumulative_states
start_time = input_seq_len - 1
d_states_1 = tf.nest.map_structure(lambda x, y: x[start_time + 1] + y,
d_cumulative_states, d_final_states)
bwd_loop_vars = NestedMap(
cur_time=start_time,
theta=fwd_loop_vars.theta,
inputs=fwd_loop_vars.inputs,
cumulative_states=cumulative_states,
d_cumulative_states=d_cumulative_states,
d_theta=fwd_loop_vars.theta.Transform(jnp.zeros_like),
d_inputs=fwd_loop_vars.inputs.Transform(jnp.zeros_like),
d_states_1=d_states_1)
def bwd_comp_fn(loop_vars):
t = loop_vars.cur_time
inputs = loop_vars.inputs
inputs_t = inputs.Transform(lambda x: x[t])
states_0 = loop_vars.cumulative_states.Transform(lambda x: x[t])
d_cell_in = wrapped_cell_fn_grad(
NestedMap(
t=t, theta=loop_vars.theta, states_0=states_0, inputs_t=inputs_t),
loop_vars.d_states_1)
d_theta = tf.nest.map_structure(lambda x, y: x + y, loop_vars.d_theta,
d_cell_in.theta)
d_states_0 = tf.nest.map_structure(lambda x, y: x + y[t],
d_cell_in.states_0,
loop_vars.d_cumulative_states)
def set_t(x, x_t):
return x.at[t].set(x_t)
d_inputs = tf.nest.map_structure(set_t, loop_vars.d_inputs,
d_cell_in.inputs_t)
loop_vars_out = loop_vars.Transform(lambda x: x)
loop_vars_out.d_inputs = d_inputs
loop_vars_out.d_states_1 = d_states_0
loop_vars_out.d_theta = d_theta
loop_vars_out.cur_time = t - 1
return loop_vars_out
def bwd_continue_fn(loop_vars):
return loop_vars.cur_time >= 0
bwd_final_loop_vars = jax.lax.while_loop(bwd_continue_fn, bwd_comp_fn,
bwd_loop_vars)
d_out = fwd_loop_vars.Transform(jnp.zeros_like)
tf.nest.map_structure(same_type_shape, d_out.states_0,
bwd_final_loop_vars.d_states_1)
tf.nest.map_structure(same_type_shape, d_out.theta,
bwd_final_loop_vars.d_theta)
tf.nest.map_structure(same_type_shape, d_out.inputs,
bwd_final_loop_vars.d_inputs)
d_out.states_0 = bwd_final_loop_vars.d_states_1
d_out.theta = bwd_final_loop_vars.d_theta
d_out.inputs = bwd_final_loop_vars.d_inputs
return (d_out,)
fwd_loop.defvjp(loop_fn_vjp_fwd, loop_fn_vjp_bwd)
# Finally, let's simply run the forward loop fn.
fwd_final_loop_vars = fwd_loop(fwd_initial_loop_vars)
fwd_cumulative_states = fwd_final_loop_vars.cumulative_states.Transform(
lambda x: x[1:])
return fwd_final_loop_vars.final_states, fwd_cumulative_states
def recurrent_static(theta: NestedMap,
states_0: NestedMap,
inputs: NestedMap,
cell_fn: Callable[[NestedMap, NestedMap, NestedMap],
NestedMap],
root_layer: Optional[base_layer.BaseLayer] = None):
"""A simpler form of Recurrent where num of steps is known statically.
Back-prop is availale through auto-diff.
'padding' in inputs is used to skip certain steps dynamically. If the
'padding' tensor exists, it is expected of a binary 0/1 tensor.
Args:
theta: weights. A `.NestedMap`.
states_0: initial state. A `.NestedMap`.
inputs: inputs. A `.NestedMap`. All inputs in time-major.
cell_fn: A python function which computes::
states_1 = cell_fn(theta, states_0, inputs[t, :])
root_layer: The root layer within which this recurrent_static recurrent loop
is carried out.
Returns:
`accumulate_state` and the final state.
"""
assert 'time_step' not in states_0
# The initial time step.
time_step = jnp.array(0, dtype=jnp.uint32)
# Make a copy of states_0 structure.
states_0 = tf.nest.map_structure(lambda x: x, states_0)
states_0.time_step = time_step
prng_key = base_layer.next_prng_key()
global_step = base_layer.cur_global_step()
# TODO(zhangqiaorjc): Switch to ad_checkpoint.checkpoint after mattjj bug fix.
@jax.checkpoint
def comp_fn(states_0, inputs_t):
# Start a new prng_key branch that also depends on the time step.
if root_layer is not None:
forward_updated_vars_before = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
prng_key_t = jax.random.fold_in(prng_key, states_0.time_step)
with base_layer.JaxContext.new_context(
prng_key=prng_key_t, global_step=global_step):
# NO side-effect ops are allowed as the enclosing JaxContext is not bound
# to any layer.
#
# Whether or not we should skip this time step.
if 'padding' in inputs_t:
# We skip if all are padded steps.
skip = jnp.all(inputs_t.padding > 0.5)
else:
skip = jnp.array(False)
def carry_over(args):
states_0, inputs_t = args
del inputs_t
# We simply carry over the states for this time step.
states_1 = tf.nest.map_structure(lambda x: x, states_0)
states_1.time_step = states_0.time_step + 1
return states_1
def do_compute(args):
states_0, inputs_t = args
# Actually carry out the computation.
states_1 = cell_fn(theta, states_0, inputs_t)
states_1.time_step = states_0.time_step + 1
return states_1
if 'padding' in inputs_t:
states_1 = jax.lax.cond(skip, carry_over, do_compute,
(states_0, inputs_t))
else:
states_1 = do_compute((states_0, inputs_t))
tf.nest.assert_same_structure(states_0, states_1)
if root_layer is not None:
forward_updated_vars_after = tf.nest.map_structure(
lambda x: x, root_layer.forward_updated_vars)
def assert_no_change(x, y):
assert (x is None and y is None) or (x is not None and y is not None)
tf.nest.map_structure(assert_no_change, forward_updated_vars_before,
forward_updated_vars_after)
return states_1, states_1
final_states, cumulative_states = jax.lax.scan(comp_fn, states_0, inputs)
del final_states.time_step
del cumulative_states.time_step
return final_states, cumulative_states
def scan(carry_init: NestedMap,
xs: NestedMap,
fn: Callable[[NestedMap, NestedMap], Tuple[NestedMap, NestedMap]],
root_layer: Optional[base_layer.BaseLayer] = None,
checkpoint_policy: AutodiffCheckpointType = AutodiffCheckpointType
.SAVE_NOTHING):
"""A simple wrap around jax.lax.scan.
Back-prop is availale through auto-diff.
Args:
carry_init: initial state. A `.NestedMap`.
xs: inputs. A `.NestedMap`. All inputs in time-major.
fn: A python function which computes:
carry, ys[t] = fn(carry, xs[t, :])
root_layer: The root layer within which this jax.lax.scan based while_loop
is carried out. If root_layer is provided, some basic-effort check is
performed to make sure fn is side-effect free. Otherwise, no such checks
are performed.
checkpoint_policy: A AutodiffCheckpointType. How to checkpoint for BProp:
SAVE_NOTHING, SAVE_DOT_ONLY, SAVE_DOT_WITH_NO_BATCH_DIM.
Returns:
(final 'carry', 'ys', stacked summaries).
"""
del root_layer
assert isinstance(carry_init, py_utils.NestedMap)
assert isinstance(xs, py_utils.NestedMap)
# Make a copy of carry_init structure.
carry_init = tf.nest.map_structure(lambda x: x, carry_init)
# "carry" will be augmented with the following three tensors, so make sure
# they don't already exist in the NestedMap.
assert 'time_step' not in carry_init
assert 'prng_key' not in carry_init
assert 'global_step' not in carry_init
def custom_policy(checkpoint_policy: AutodiffCheckpointType):
# TODO(zhangqiaorjc): Configure custom checkpoint policy in expt config
# without introducing enum.
if checkpoint_policy == AutodiffCheckpointType.SAVE_EVERYTHING:
return jax.checkpoint_policies.everything_saveable
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_ONLY:
return jax.checkpoint_policies.checkpoint_dots
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_WITH_NO_BATCH_DIM:
return jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims
if checkpoint_policy == AutodiffCheckpointType.SAVE_QKV_OUT_PROJ:
return jax.checkpoint_policies.save_only_these_names(
'combined_qkv_proj', 'out_proj')
if checkpoint_policy == AutodiffCheckpointType.SAVE_CONTEXT:
return jax.checkpoint_policies.save_only_these_names('context')
if checkpoint_policy == AutodiffCheckpointType.SAVE_OUT_PROJ:
return jax.checkpoint_policies.save_only_these_names('out_proj')
if checkpoint_policy == AutodiffCheckpointType.SAVE_CONTEXT_AND_OUT_PROJ:
return jax.checkpoint_policies.save_only_these_names(
'context', 'out_proj')
if checkpoint_policy == AutodiffCheckpointType.SAVE_DOT_FOR_MLPERF_200B:
return jax.checkpoint_policies.save_only_these_names(
'combined_qkv_proj', 'query_proj', 'value_proj', 'key_proj',
'context', 'out_proj')
assert checkpoint_policy == AutodiffCheckpointType.SAVE_NOTHING
return jax.checkpoint_policies.nothing_saveable
@functools.partial(
ad_checkpoint.checkpoint,
prevent_cse=False,
policy=custom_policy(checkpoint_policy))
def fn_wrap(carry, xs_t):
# carry is augmented with time_step, prng_key, global_step three additional
# tensors to make fn_wrap fully functional.
# Start a new prng_key branch that also depends on the time step.
prng_key_t = jax.random.fold_in(carry.prng_key, carry.time_step)
with base_layer.JaxContext.new_context(
prng_key=prng_key_t, global_step=carry.global_step):
carry_new, ys_t = fn(carry, xs_t)
carry_new.time_step = carry.time_step + 1
# copy over prng_key and global_step
carry_new.prng_key = carry.prng_key
carry_new.global_step = carry.global_step
tf.nest.assert_same_structure(carry_new, carry)
summaries = base_layer.all_summaries()
return carry_new, (ys_t, summaries)
# The initial time step.
time_step = jnp.array(0, dtype=jnp.uint32)
prng_key = base_layer.next_prng_key()
global_step = base_layer.cur_global_step()
carry_init.time_step = time_step
carry_init.prng_key = prng_key
carry_init.global_step = global_step
carry_final, (ys, summaries) = jax.lax.scan(fn_wrap, carry_init, xs)
del carry_final.time_step
del carry_final.global_step
del carry_final.prng_key
return carry_final, ys, summaries
|
tensorflow/lingvo
|
lingvo/jax/layers/recurrent.py
|
Python
|
apache-2.0
| 17,731
|
# Python < 3 needs this: coding=utf-8
import pytest
from pybind11_tests import builtin_casters as m
from pybind11_tests import UserType, IncType
def test_simple_string():
assert m.string_roundtrip("const char *") == "const char *"
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
assert m.good_utf8_string() == u"Say utf8‽ 🎂 𝐀"
assert m.good_utf16_string() == u"b‽🎂𝐀z"
assert m.good_utf32_string() == u"a𝐀🎂‽z"
assert m.good_wchar_string() == u"a⸘𝐀z"
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
m.bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)
if hasattr(m, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf32_string()
if hasattr(m, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
assert m.u8_Z() == 'Z'
assert m.u8_eacute() == u'é'
assert m.u16_ibang() == u'‽'
assert m.u32_mathbfA() == u'𝐀'
assert m.wchar_heart() == u'♥'
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
toolong_message = "Expected a character, but multi-character string found"
assert m.ord_char(u'a') == 0x61 # simple ASCII
assert m.ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'ab')
assert str(excinfo.value) == toolong_message
assert m.ord_char16(u'a') == 0x61
assert m.ord_char16(u'é') == 0xE9
assert m.ord_char16(u'Ā') == 0x100
assert m.ord_char16(u'‽') == 0x203d
assert m.ord_char16(u'♥') == 0x2665
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_char32(u'a') == 0x61
assert m.ord_char32(u'é') == 0xE9
assert m.ord_char32(u'Ā') == 0x100
assert m.ord_char32(u'‽') == 0x203d
assert m.ord_char32(u'♥') == 0x2665
assert m.ord_char32(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_char32(u'aa')
assert str(excinfo.value) == toolong_message
assert m.ord_wchar(u'a') == 0x61
assert m.ord_wchar(u'é') == 0xE9
assert m.ord_wchar(u'Ā') == 0x100
assert m.ord_wchar(u'‽') == 0x203d
assert m.ord_wchar(u'♥') == 0x2665
if m.wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert m.ord_wchar(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'aa')
assert str(excinfo.value) == toolong_message
def test_bytes_to_string():
"""Tests the ability to pass bytes to C++ string-accepting functions. Note that this is
one-way: the only way to return bytes to Python is via the pybind11::bytes class."""
# Issue #816
import sys
byte = bytes if sys.version_info[0] < 3 else str
assert m.strlen(byte("hi")) == 2
assert m.string_length(byte("world")) == 5
assert m.string_length(byte("a\x00b")) == 3
assert m.strlen(byte("a\x00b")) == 1 # C-string limitation
# passing in a utf8 encoded string should work
assert m.string_length(u'💩'.encode("utf8")) == 4
@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no <string_view>")
def test_string_view(capture):
"""Tests support for C++17 string_view arguments and return values"""
assert m.string_view_chars("Hi") == [72, 105]
assert m.string_view_chars("Hi 🎂") == [72, 105, 32, 0xf0, 0x9f, 0x8e, 0x82]
assert m.string_view16_chars("Hi 🎂") == [72, 105, 32, 0xd83c, 0xdf82]
assert m.string_view32_chars("Hi 🎂") == [72, 105, 32, 127874]
assert m.string_view_return() == "utf8 secret 🎂"
assert m.string_view16_return() == "utf16 secret 🎂"
assert m.string_view32_return() == "utf32 secret 🎂"
with capture:
m.string_view_print("Hi")
m.string_view_print("utf8 🎂")
m.string_view16_print("utf16 🎂")
m.string_view32_print("utf32 🎂")
assert capture == """
Hi 2
utf8 🎂 9
utf16 🎂 8
utf32 🎂 7
"""
with capture:
m.string_view_print("Hi, ascii")
m.string_view_print("Hi, utf8 🎂")
m.string_view16_print("Hi, utf16 🎂")
m.string_view32_print("Hi, utf32 🎂")
assert capture == """
Hi, ascii 9
Hi, utf8 🎂 13
Hi, utf16 🎂 12
Hi, utf32 🎂 11
"""
def test_integer_casting():
"""Issue #929 - out-of-range integer values shouldn't be accepted"""
import sys
assert m.i32_str(-1) == "-1"
assert m.i64_str(-1) == "-1"
assert m.i32_str(2000000000) == "2000000000"
assert m.u32_str(2000000000) == "2000000000"
if sys.version_info < (3,):
assert m.i32_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-1)) == "-1" # noqa: F821 undefined name 'long'
assert m.i64_str(long(-999999999999)) == "-999999999999" # noqa: F821 undefined name
assert m.u64_str(long(999999999999)) == "999999999999" # noqa: F821 undefined name 'long'
else:
assert m.i64_str(-999999999999) == "-999999999999"
assert m.u64_str(999999999999) == "999999999999"
with pytest.raises(TypeError) as excinfo:
m.u32_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(-3000000000)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(3000000000)
assert "incompatible function arguments" in str(excinfo.value)
if sys.version_info < (3,):
with pytest.raises(TypeError) as excinfo:
m.u32_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(long(-1)) # noqa: F821 undefined name 'long'
assert "incompatible function arguments" in str(excinfo.value)
def test_tuple(doc):
"""std::pair <-> tuple & std::tuple <-> tuple"""
assert m.pair_passthrough((True, "test")) == ("test", True)
assert m.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert m.pair_passthrough([True, "test"]) == ("test", True)
assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert m.empty_tuple() == ()
assert doc(m.pair_passthrough) == """
pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
assert doc(m.tuple_passthrough) == """
tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
"""
assert m.rvalue_pair() == ("rvalue", "rvalue")
assert m.lvalue_pair() == ("lvalue", "lvalue")
assert m.rvalue_tuple() == ("rvalue", "rvalue", "rvalue")
assert m.lvalue_tuple() == ("lvalue", "lvalue", "lvalue")
assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue")))
assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue")))
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
assert m.return_none_string() is None
assert m.return_none_char() is None
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
def test_none_deferred():
"""None passed as various argument types should defer to other overloads"""
assert not m.defer_none_cstring("abc")
assert m.defer_none_cstring(None)
assert not m.defer_none_custom(UserType())
assert m.defer_none_custom(None)
assert m.nodefer_none_void(None)
def test_void_caster():
assert m.load_nullptr_t(None) is None
assert m.cast_nullptr_t() is None
def test_reference_wrapper():
"""std::reference_wrapper for builtin and user types"""
assert m.refwrap_builtin(42) == 420
assert m.refwrap_usertype(UserType(42)) == 42
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert "incompatible function arguments" in str(excinfo.value)
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert [x.value for x in a1] == [2, 3]
assert [x.value for x in a2] == [2, 3]
assert not a1[0] is a2[0] and not a1[1] is a2[1]
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert [x.value for x in b1] == [1, 2]
assert [x.value for x in b2] == [1, 2]
assert b1[0] is b2[0] and b1[1] is b2[1]
assert m.refwrap_iiw(IncType(5)) == 5
assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]
def test_complex_cast():
"""std::complex casts"""
assert m.complex_cast(1) == "1.0"
assert m.complex_cast(2j) == "(0.0, 2.0)"
def test_bool_caster():
"""Test bool caster implicit conversions."""
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def require_implicit(v):
pytest.raises(TypeError, noconvert, v)
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# straight up bool
assert convert(True) is True
assert convert(False) is False
assert noconvert(True) is True
assert noconvert(False) is False
# None requires implicit conversion
require_implicit(None)
assert convert(None) is False
class A(object):
def __init__(self, x):
self.x = x
def __nonzero__(self):
return self.x
def __bool__(self):
return self.x
class B(object):
pass
# Arbitrary objects are not accepted
cant_convert(object())
cant_convert(B())
# Objects with __nonzero__ / __bool__ defined can be converted
require_implicit(A(True))
assert convert(A(True)) is True
assert convert(A(False)) is False
@pytest.requires_numpy
def test_numpy_bool():
import numpy as np
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
# np.bool_ is not considered implicit
assert convert(np.bool_(True)) is True
assert convert(np.bool_(False)) is False
assert noconvert(np.bool_(True)) is True
assert noconvert(np.bool_(False)) is False
|
Lemma1/MAC-POSTS
|
src/pybinder/pybind11/tests/test_builtin_casters.py
|
Python
|
mit
| 11,540
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class DictionaryOperations(object):
"""DictionaryOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null dictionary value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get empty dictionary value {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_empty(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{str}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null_value(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with null value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/nullvalue'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_null_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with null key.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/nullkey'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_empty_string_key(
self, custom_headers=None, raw=False, **operation_config):
"""Get Dictionary with key as empty string.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/keyemptystring'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Dictionary value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_boolean_tfft(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": true, "1": false, "2": false, "3":
true }.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/boolean/tfft'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_boolean_tfft(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": true, "1": false, "2": false, "3":
true }.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/boolean/tfft'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{bool}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": true, "1": null, "2": false }.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/boolean/true.null.false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_boolean_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value '{"0": true, "1": "boolean", "2": false}'.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/boolean/true.boolean.false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bool}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_integer_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": -1, "2": 3, "3": 300}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/integer/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_integer_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": 1, "1": -1, "2": 3, "3": 300}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/integer/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{int}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": null, "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/integer/1.null.zero'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_int_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": "integer", "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/integer/1.integer.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{int}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_long_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": 1, "1": -1, "2": 3, "3": 300}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/long/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_long_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": 1, "1": -1, "2": 3, "3": 300}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/long/1.-1.3.300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{long}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_long_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get long dictionary value {"0": 1, "1": null, "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/long/1.null.zero'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_long_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get long dictionary value {"0": 1, "1": "integer", "2": 0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/long/1.integer.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{long}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_float_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/float/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_float_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/float/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{float}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_float_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0.0, "1": null, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/float/0.0-null-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_float_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": 1.0, "1": "number", "2": 0.0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/float/1.number.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_double_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/double/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_double_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": 0, "1": -0.01, "2": 1.2e20}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/double/0--0.01-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{float}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_double_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get float dictionary value {"0": 0.0, "1": null, "2": 1.2e20}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/double/0.0-null-1.2e20'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_double_invalid_string(
self, custom_headers=None, raw=False, **operation_config):
"""Get boolean dictionary value {"0": 1.0, "1": "number", "2": 0.0}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/double/1.number.0'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{float}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_string_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo1", "1": "foo2", "2": "foo3"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/string/foo1.foo2.foo3'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_string_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "foo1", "1": "foo2", "2": "foo3"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/string/foo1.foo2.foo3'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{str}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_string_with_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo", "1": null, "2": "foo2"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/string/foo.null.foo2'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_string_with_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get string dictionary value {"0": "foo", "1": 123, "2": "foo2"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/string/foo.123.foo2'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{str}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get integer dictionary value {"0": "2000-12-01", "1": "1980-01-02",
"2": "1492-10-12"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "2000-12-01", "1": "1980-01-02", "2":
"1492-10-12"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{date}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_date_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2012-01-01", "1": null, "2":
"1776-07-04"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_invalid_chars(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2011-03-22", "1": "date"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date/invalidchars'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{date}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get date-time dictionary value {"0": "2000-12-01t00:00:01z", "1":
"1980-01-02T00:11:35+01:00", "2": "1492-10-12T10:15:01-08:00"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_time_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "2000-12-01t00:00:01z", "1":
"1980-01-02T00:11:35+01:00", "2": "1492-10-12T10:15:01-08:00"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{iso-8601}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_date_time_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2000-12-01t00:00:01z", "1": null}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_invalid_chars(
self, custom_headers=None, raw=False, **operation_config):
"""Get date dictionary value {"0": "2000-12-01t00:00:01z", "1":
"date-time"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time/invalidchars'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{iso-8601}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_date_time_rfc1123_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get date-time-rfc1123 dictionary value {"0": "Fri, 01 Dec 2000
00:00:01 GMT", "1": "Wed, 02 Jan 1980 00:11:35 GMT", "2": "Wed, 12
Oct 1492 10:15:01 GMT"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time-rfc1123/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{rfc-1123}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_date_time_rfc1123_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value empty {"0": "Fri, 01 Dec 2000 00:00:01 GMT", "1":
"Wed, 02 Jan 1980 00:11:35 GMT", "2": "Wed, 12 Oct 1492 10:15:01
GMT"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/date-time-rfc1123/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{rfc-1123}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_duration_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get duration dictionary value {"0": "P123DT22H14M12.011S", "1":
"P5DT1H0M0S"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/duration/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{duration}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_duration_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Set dictionary value {"0": "P123DT22H14M12.011S", "1": "P5DT1H0M0S"}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/duration/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{duration}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_byte_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get byte dictionary value {"0": hex(FF FF FF FA), "1": hex(01 02 03),
"2": hex (25, 29, 43)} with each item encoded in base64.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/byte/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bytearray}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_byte_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put the dictionary value {"0": hex(FF FF FF FA), "1": hex(01 02 03),
"2": hex (25, 29, 43)} with each elementencoded in base 64.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/byte/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{bytearray}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_byte_invalid_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get byte dictionary value {"0": hex(FF FF FF FA), "1": null} with the
first item base64 encoded.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/byte/invalidnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{bytearray}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_base64_url(
self, custom_headers=None, raw=False, **operation_config):
"""Get base64url dictionary value {"0": "a string that gets encoded with
base64url", "1": "test string", "2": "Lorem ipsum"}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/prim/base64url/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{base64}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type null value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get empty dictionary of complex type {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with null item {"0": {"integer": 1,
"string": "2"}, "1": null, "2": {"integer": 5, "string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with empty item {"0": {"integer": 1,
"string": "2"}, "1:" {}, "2": {"integer": 5, "string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_complex_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get dictionary of complex type with {"0": {"integer": 1, "string":
"2"}, "1": {"integer": 3, "string": "4"}, "2": {"integer": 5,
"string": "6"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{Widget}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_complex_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put an dictionary of complex type with values {"0": {"integer": 1,
"string": "2"}, "1": {"integer": 3, "string": "4"}, "2": {"integer":
5, "string": "6"}}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/complex/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{Widget}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_array_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get a null array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an empty dictionary {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionary of array of strings {"0": ["1", "2", "3"], "1":
null, "2": ["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an array of array of strings [{"0": ["1", "2", "3"], "1": [], "2":
["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_array_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get an array of array of strings {"0": ["1", "2", "3"], "1": ["4",
"5", "6"], "2": ["7", "8", "9"]}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{[str]}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_array_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Put An array of array of strings {"0": ["1", "2", "3"], "1": ["4",
"5", "6"], "2": ["7", "8", "9"]}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/array/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{[str]}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_dictionary_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries with value null.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/empty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_item_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": null, "2":
{"7": "seven", "8": "eight", "9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/itemnull'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_item_empty(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {}, "2":
{"7": "seven", "8": "eight", "9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/itemempty'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_dictionary_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {"4":
"four", "5": "five", "6": "six"}, "2": {"7": "seven", "8": "eight",
"9": "nine"}}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: dict
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{{str}}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_dictionary_valid(
self, array_body, custom_headers=None, raw=False, **operation_config):
"""Get an dictionaries of dictionaries of type <string, string> with
value {"0": {"1": "one", "2": "two", "3": "three"}, "1": {"4":
"four", "5": "five", "6": "six"}, "2": {"7": "seven", "8": "eight",
"9": "nine"}}.
:param array_body:
:type array_body: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyDictionary.models.ErrorException>`
"""
# Construct URL
url = '/dictionary/dictionary/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(array_body, '{{str}}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
fhoring/autorest
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyDictionary/autorestswaggerbatdictionaryservice/operations/dictionary_operations.py
|
Python
|
mit
| 110,391
|
# import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from netCDF4 import Dataset
from postladim import ParticleFile
# ---------------
# User settings
# ---------------
# Files
particle_file = "streak.nc"
grid_file = "../data/ocean_avg_0014.nc"
# Subgrid definition
i0, i1 = 100, 140
j0, j1 = 85, 130
# ----------------
# ROMS grid, plot domain
with Dataset(grid_file) as f0:
H = f0.variables["h"][j0:j1, i0:i1]
M = f0.variables["mask_rho"][j0:j1, i0:i1]
lon = f0.variables["lon_rho"][j0:j1, i0:i1]
lat = f0.variables["lat_rho"][j0:j1, i0:i1]
# Cell centers and boundaries
Xcell = np.arange(i0, i1)
Ycell = np.arange(j0, j1)
Xb = np.arange(i0 - 0.5, i1)
Yb = np.arange(j0 - 0.5, j1)
def age(t):
"""Return age in days of the particles with time index t"""
day = np.timedelta64(1, "D")
return (pf.time[t] - pf.release_time[pf.pid[t]]) / day
# particle_file
pf = ParticleFile(particle_file)
num_times = pf.num_times
# Set up the plot area
fig = plt.figure(figsize=(12, 10))
ax = plt.axes(xlim=(i0 + 1, i1 - 1), ylim=(j0 + 1, j1 - 1), aspect="equal")
# Background bathymetry
cmap = plt.get_cmap("Blues")
ax.contourf(Xcell, Ycell, H, cmap=cmap, alpha=0.3)
# Lon/lat lines
ax.contour(Xcell, Ycell, lat, levels=range(57, 64), colors="black", linestyles=":")
ax.contour(Xcell, Ycell, lon, levels=range(-4, 10, 2), colors="black", linestyles=":")
# Landmask
constmap = plt.matplotlib.colors.ListedColormap([0.2, 0.6, 0.4])
M = np.ma.masked_where(M > 0, M)
plt.pcolormesh(Xb, Yb, M, cmap=constmap)
# Scatter plot, colour = particle age
X, Y = pf.position(0)
pids = pf["pid"][0]
C = age(0)
vmax = pf.num_times / 6 # Maximum particle age in days
pdistr = ax.scatter(X, Y, c=C, vmin=0, vmax=vmax, cmap=plt.get_cmap("plasma_r"))
cb = plt.colorbar(pdistr)
cb.set_label("Particle age [days]", fontsize=14)
timestamp = ax.text(0.01, 0.97, pf.time(0), fontsize=15, transform=ax.transAxes)
# Update function
def animate(t):
X, Y = pf.position(t)
pdistr.set_offsets(np.vstack((X, Y)).T)
# Particle age in days
C = age(t)
pdistr.set_array(C)
timestamp.set_text(pf.time(t))
return pdistr, timestamp
# Do the animation
anim = FuncAnimation(
fig,
animate,
frames=num_times,
interval=20,
repeat=True,
repeat_delay=500,
blit=True,
)
plt.show()
|
bjornaa/ladim
|
examples/streak/animate.py
|
Python
|
mit
| 2,388
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from invenio_workflows.errors import WorkflowsError
class BadGatewayError(WorkflowsError):
"""Error representing a network failure during a workflow"""
class DownloadError(WorkflowsError):
"""Error representing a failed download in a workflow."""
class MergeError(WorkflowsError):
"""Error representing a failed merge in a workflow."""
class CallbackError(WorkflowsError):
"""Callback exception."""
code = 400
error_code = 'CALLBACK_ERROR'
errors = None
message = 'Workflow callback error.'
workflow = None
def to_dict(self):
"""Execption to dictionary."""
response = {}
response['error_code'] = self.error_code
if self.errors is not None:
response['errors'] = self.errors
response['message'] = self.message
if self.workflow is not None:
response['workflow'] = self.workflow
return response
class CallbackMalformedError(CallbackError):
"""Malformed request exception."""
error_code = 'MALFORMED'
message = 'The workflow request is malformed.'
def __init__(self, errors=None, **kwargs):
"""Initialize exception."""
super(CallbackMalformedError, self).__init__(**kwargs)
self.errors = errors
class CallbackWorkflowNotFoundError(CallbackError):
"""Workflow not found exception."""
code = 404
error_code = 'WORKFLOW_NOT_FOUND'
def __init__(self, workflow_id, **kwargs):
"""Initialize exception."""
super(CallbackWorkflowNotFoundError, self).__init__(**kwargs)
self.message = 'The workflow with id "{}" was not found.'.format(
workflow_id)
class CallbackValidationError(CallbackError):
"""Validation error exception."""
error_code = 'VALIDATION_ERROR'
message = 'Validation error.'
def __init__(self, workflow_data, **kwargs):
"""Initialize exception."""
super(CallbackValidationError, self).__init__(**kwargs)
self.workflow = workflow_data
class CallbackWorkflowNotInValidationError(CallbackError):
"""Validation workflow not in validation error exception."""
error_code = 'WORKFLOW_NOT_IN_ERROR_STATE'
def __init__(self, workflow_id, **kwargs):
"""Initialize exception."""
super(CallbackWorkflowNotInValidationError, self).__init__(**kwargs)
self.message = 'Workflow {} is not in validation error state.'.format(
workflow_id)
class CallbackWorkflowNotInMergeState(CallbackError):
"""Workflow not in validation error exception."""
error_code = 'WORKFLOW_NOT_IN_MERGE_STATE'
def __init__(self, workflow_id, **kwargs):
"""Initialize exception."""
super(CallbackWorkflowNotInMergeState, self).__init__(**kwargs)
self.message = 'Workflow {} is not in merge state.'.format(
workflow_id)
class CallbackWorkflowNotInWaitingEditState(CallbackError):
"""Workflow not in validation error exception."""
error_code = 'WORKFLOW_NOT_IN_WAITING_FOR_CURATION_STATE'
def __init__(self, workflow_id, **kwargs):
"""Initialize exception."""
super(CallbackWorkflowNotInWaitingEditState, self).__init__(**kwargs)
self.message = 'Workflow {} is not in waiting for curation state.'.\
format(workflow_id)
class CallbackRecordNotFoundError(CallbackError):
"""Record not found exception."""
code = 404
error_code = 'RECORD_NOT_FOUND'
def __init__(self, recid, **kwargs):
"""Initialize exception."""
super(CallbackRecordNotFoundError, self).__init__(**kwargs)
self.message = 'The record with id "{}" was not found.'.format(recid)
class InspirehepMissingDataError(Exception):
pass
class CannotFindProperSubgroup(WorkflowsError):
def __init__(self, collaboration_id, subgroup, **kwargs):
super(CannotFindProperSubgroup, self).__init__(**kwargs)
self.collaboration_id = collaboration_id
self.subgroup_missing = subgroup
self.message = "Subgroup {missing_subgroup} was not found in collaboration {collaboration_id} (normalization problem).".format(missing_subgroup=self.subgroup_missing, collaboration_id=self.collaboration_id, wf_id=self.wf_id)
class MissingRecordControlNumber(WorkflowsError):
def __init__(self):
self.message = "Cannot find control_number in record data."
|
inspirehep/inspire-next
|
inspirehep/modules/workflows/errors.py
|
Python
|
gpl-3.0
| 5,348
|
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from contextlib import contextmanager
import os
from rpmfluff import SimpleRpmBuild, SourceFile, expectedArch
import shutil
import tempfile
import unittest
from pylorax.dnfbase import get_dnf_base_object
from pylorax.ltmpl import LoraxTemplate, LoraxTemplateRunner
from pylorax.ltmpl import brace_expand, split_and_expand, rglob, rexists
from pylorax.sysutils import joinpaths
class TemplateFunctionsTestCase(unittest.TestCase):
def test_brace_expand(self):
"""Test expanding braces"""
self.assertEqual(list(brace_expand("foo")), ["foo"])
self.assertEqual(list(brace_expand("${foo}")), ["${foo}"])
self.assertEqual(list(brace_expand("${foo,bar}")), ["$foo", "$bar"])
self.assertEqual(list(brace_expand("foo {one,two,three,four}")), ["foo one", "foo two", "foo three", "foo four"])
def test_split_and_expand(self):
"""Test splitting lines and expanding braces"""
self.assertEqual(list(split_and_expand("foo bar")), ["foo", "bar"])
self.assertEqual(list(split_and_expand("foo bar-{one,two}")), ["foo", "bar-one", "bar-two"])
self.assertEqual(list(split_and_expand("foo 'bar {one,two}'")), ["foo", "bar one", "bar two"])
self.assertEqual(list(split_and_expand('foo "bar {one,two}"')), ["foo", "bar one", "bar two"])
def test_rglob(self):
"""Test rglob function"""
self.assertEqual(list(rglob("*http*toml", "./tests/pylorax/blueprints", fatal=False)), ["example-http-server.toml"])
self.assertEqual(list(rglob("einstein", "./tests/pylorax/blueprints", fatal=False)), [])
with self.assertRaises(IOError):
list(rglob("einstein", "./tests/pylorax/blueprints", fatal=True))
def test_rexists(self):
"""Test rexists function"""
self.assertTrue(rexists("*http*toml", "./tests/pylorax/blueprints"))
self.assertFalse(rexists("einstein", "./tests/pylorax/blueprints"))
class LoraxTemplateTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
self.templates = LoraxTemplate(["./tests/pylorax/templates/"])
def test_parse_missing_quote(self):
"""Test parsing a template with missing quote"""
with self.assertRaises(Exception):
self.templates.parse("parse-missing-quote.tmpl", {"basearch": "x86_64"})
def test_parse_template_x86_64(self):
"""Test LoraxTemplate.parse() with basearch set to x86_64"""
commands = self.templates.parse("parse-test.tmpl", {"basearch": "x86_64"})
self.assertEqual(commands, [['installpkg', 'common-package'],
['installpkg', 'foo-one', 'foo-two'],
['installpkg', 'not-s390x-package'],
['run_pkg_transaction']])
def test_parse_template_s390x(self):
"""Test LoraxTemplate.parse() with basearch set to s390x"""
commands = self.templates.parse("parse-test.tmpl", {"basearch": "s390x"})
self.assertEqual(commands, [['installpkg', 'common-package'],
['installpkg', 'foo-one', 'foo-two'],
['run_pkg_transaction']])
@contextmanager
def in_tempdir(prefix='tmp'):
"""Execute a block of code with chdir in a temporary location"""
oldcwd = os.getcwd()
tmpdir = tempfile.mkdtemp(prefix=prefix)
os.chdir(tmpdir)
try:
yield
finally:
os.chdir(oldcwd)
shutil.rmtree(tmpdir)
def makeFakeRPM(repo_dir, name, epoch, version, release, files=None):
"""Make a fake rpm file in repo_dir"""
p = SimpleRpmBuild(name, version, release)
if epoch:
p.epoch = epoch
if not files:
p.add_simple_payload_file_random()
else:
# Make a number of fake file entries in the rpm
for f in files:
p.add_installed_file(
installPath = f,
sourceFile = SourceFile(os.path.basename(f), "THIS IS A FAKE FILE"))
with in_tempdir("lorax-test-rpms."):
p.make()
rpmfile = p.get_built_rpm(expectedArch)
shutil.move(rpmfile, repo_dir)
class LoraxTemplateRunnerTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
# Create 2 repositories with rpmfluff
self.repo1_dir = tempfile.mkdtemp(prefix="lorax.test.repo.")
makeFakeRPM(self.repo1_dir, "anaconda-core", 0, "0.0.1", "1")
makeFakeRPM(self.repo1_dir, "exact", 0, "1.3.17", "1")
makeFakeRPM(self.repo1_dir, "fake-milhouse", 0, "1.0.0", "1")
makeFakeRPM(self.repo1_dir, "fake-bart", 2, "1.13.0", "6")
makeFakeRPM(self.repo1_dir, "fake-homer", 0, "0.4.0", "2")
makeFakeRPM(self.repo1_dir, "lots-of-files", 0, "0.1.1", "1",
["/lorax-files/file-one.txt",
"/lorax-files/file-two.txt",
"/lorax-files/file-three.txt"])
makeFakeRPM(self.repo1_dir, "known-path", 0, "0.1.8", "1", ["/known-path/file-one.txt"])
os.system("createrepo_c " + self.repo1_dir)
self.repo2_dir = tempfile.mkdtemp(prefix="lorax.test.repo.")
makeFakeRPM(self.repo2_dir, "fake-milhouse", 0, "1.3.0", "1")
makeFakeRPM(self.repo2_dir, "fake-lisa", 0, "1.2.0", "1")
os.system("createrepo_c " + self.repo2_dir)
self.repo3_dir = tempfile.mkdtemp(prefix="lorax.test.debug.repo.")
makeFakeRPM(self.repo3_dir, "fake-marge", 0, "2.3.0", "1", ["/fake-marge/file-one.txt"])
makeFakeRPM(self.repo3_dir, "fake-marge-debuginfo", 0, "2.3.0", "1", ["/fake-marge/file-one-debuginfo.txt"])
os.system("createrepo_c " + self.repo3_dir)
# Get a dbo with just these repos
# Setup a template runner
self.root_dir = tempfile.mkdtemp(prefix="lorax.test.repo.")
sources = ["file://"+self.repo1_dir, "file://"+self.repo2_dir, "file://"+self.repo3_dir]
self.dnfbase = get_dnf_base_object(self.root_dir, sources,
enablerepos=[], disablerepos=[])
self.runner = LoraxTemplateRunner(inroot=self.root_dir,
outroot=self.root_dir,
dbo=self.dnfbase,
templatedir="./tests/pylorax/templates")
@classmethod
def tearDownClass(self):
shutil.rmtree(self.repo1_dir)
shutil.rmtree(self.repo2_dir)
shutil.rmtree(self.root_dir)
def test_00_runner_multi_repo(self):
"""Test installing packages with updates in a 2nd repo"""
# If this does not raise an error it means that:
# Installing a named package works (anaconda-core)
# Installing a pinned package works (exact-1.3.17)
# Installing a globbed set of package names from multiple repos works
# removepkg removes a package's files
# removefrom removes some, but not all, of a package's files
#
# These all need to be done in one template because run_pkg_transaction can only run once
self.runner.run("install-test.tmpl")
self.runner.run("install-remove-test.tmpl")
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/known-path/file-one.txt")))
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/lorax-files/file-one.txt")))
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lorax-files/file-two.txt")))
# Check the debug log
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/root/debug-pkgs.log")))
def test_install_file(self):
"""Test append, and install template commands"""
self.runner.run("install-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/etc/lorax-test")))
self.assertEqual(open(joinpaths(self.root_dir, "/etc/lorax-test")).read(), "TESTING LORAX TEMPLATES\n")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/etc/lorax-test-dest")))
def test_installimg(self):
"""Test installimg template command"""
self.runner.run("installimg-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "images/product.img")))
def test_mkdir(self):
"""Test mkdir template command"""
self.runner.run("mkdir-cmd.tmpl")
self.assertTrue(os.path.isdir(joinpaths(self.root_dir, "/etc/lorax-mkdir")))
def test_replace(self):
"""Test append, and replace template command"""
self.runner.run("replace-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/etc/lorax-replace")))
self.assertEqual(open(joinpaths(self.root_dir, "/etc/lorax-replace")).read(), "Running 1.2.3 for lorax\n")
def test_treeinfo(self):
"""Test treeinfo template command"""
self.runner.run("treeinfo-cmd.tmpl")
self.assertEqual(self.runner.results.treeinfo["images"]["boot.iso"], "images/boot.iso")
def test_installkernel(self):
"""Test installkernel template command"""
self.runner.run("installkernel-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/kernels/vmlinuz")))
self.assertEqual(self.runner.results.treeinfo["images"]["kernel"], "/kernels/vmlinuz")
def test_installinitrd(self):
"""Test installinitrd template command"""
self.runner.run("installinitrd-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/kernels/initrd.img")))
self.assertEqual(self.runner.results.treeinfo["images"]["initrd"], "/kernels/initrd.img")
def test_installupgradeinitrd(self):
"""Test installupgraedinitrd template command"""
self.runner.run("installupgradeinitrd-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/kernels/upgrade.img")))
self.assertEqual(self.runner.results.treeinfo["images"]["upgrade"], "/kernels/upgrade.img")
def test_hardlink(self):
"""Test hardlink template command"""
self.runner.run("hardlink-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/linked-file")))
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/lorax-dir/lorax-file")))
def test_symlink(self):
"""Test symlink template command"""
self.runner.run("symlink-cmd.tmpl")
self.assertTrue(os.path.islink(joinpaths(self.root_dir, "/symlinked-file")))
def test_copy(self):
"""Test copy template command"""
self.runner.run("copy-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/copied-file")))
def test_move(self):
"""Test move template command"""
self.runner.run("move-cmd.tmpl")
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lorax-file")))
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/moved-file")))
def test_remove(self):
"""Test remove template command"""
self.runner.run("remove-cmd.tmpl")
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lorax-file")))
def test_chmod(self):
"""Test chmod template command"""
self.runner.run("chmod-cmd.tmpl")
self.assertEqual(os.stat(joinpaths(self.root_dir, "/lorax-file")).st_mode, 0o100641)
def test_runcmd(self):
"""Test runcmd template command"""
self.runner.run("runcmd-cmd.tmpl", root=self.root_dir)
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/lorax-runcmd")))
def test_removekmod(self):
"""Test removekmod template command"""
self.runner.run("removekmod-cmd.tmpl")
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/lib/modules/1.2.3/kernel/drivers/video/bar1.ko")))
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lib/modules/1.2.3/kernel/drivers/video/bar2.ko")))
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lib/modules/1.2.3/kernel/sound/foo1.ko")))
self.assertFalse(os.path.exists(joinpaths(self.root_dir, "/lib/modules/1.2.3/kernel/sound/foo2.ko")))
def test_createaddrsize(self):
"""Test createaddrsize template command"""
self.runner.run("createaddrsize-cmd.tmpl", root=self.root_dir)
self.assertTrue(os.path.exists(joinpaths(self.root_dir, "/initrd.addrsize")))
def test_systemctl(self):
"""Test systemctl template command"""
self.runner.run("systemctl-cmd.tmpl")
self.assertTrue(os.path.islink(joinpaths(self.root_dir, "/etc/systemd/system/multi-user.target.wants/foo.service")))
def test_bad_template(self):
"""Test parsing a bad template"""
with self.assertRaises(Exception):
self.runner.run("bad-template.tmpl")
def test_unknown_cmd(self):
"""Test a template with an unknown command"""
with self.assertRaises(ValueError):
self.runner.run("unknown-cmd.tmpl")
|
wgwoods/lorax
|
tests/pylorax/test_ltmpl.py
|
Python
|
gpl-2.0
| 13,602
|
import numpy as np
from bokeh.objects import ColumnDataSource, DataRange1d, Plot, Glyph, LinearAxis, Grid
from bokeh.widgetobjects import VBox, Tabs, Panel
from bokeh.glyphs import (AnnularWedge, Annulus, Arc, Bezier, Circle, Line, MultiLine, Oval,
Patch, Patches, Quad, Quadratic, Ray, Rect, Segment, Square, Wedge, CircleX, Triangle,
Cross, Diamond, InvertedTriangle, SquareX, Asterisk, SquareCross, DiamondCross, CircleCross)
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
xpts = np.array([-.09, -.12, .0, .12, .09])
ypts = np.array([-.1, .02, .1, .02, -.1])
source = ColumnDataSource(dict(
x = x,
y = y,
sizes = sizes,
xs = [ xpts + xx for xx in x ],
ys = [ ypts + yy for yy in y ],
xp02 = x + 0.2,
xp01 = x + 0.1,
xm01 = x - 0.1,
yp01 = y + 0.1,
ym01 = y - 0.1,
))
xdr = DataRange1d(sources=[source.columns("x")])
ydr = DataRange1d(sources=[source.columns("y")])
def screen(value):
return dict(value=value, units="screen")
glyphs = [
("annular_wedge", AnnularWedge(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), start_angle=0.6, end_angle=4.1, fill_color="#8888ee")),
("annulus", Annulus(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), fill_color="#7FC97F")),
("arc", Arc(x="x", y="y", radius=screen(20), start_angle=0.6, end_angle=4.1, line_color="#BEAED4", line_width=3)),
("bezier", Bezier(x0="x", y0="y", x1="xp02", y1="y", cx0="xp01", cy0="yp01", cx1="xm01", cy1="ym01", line_color="#D95F02", line_width=2)),
("line", Line(x="x", y="y", line_color="#F46D43")),
("multi_line", MultiLine(xs="xs", ys="ys", line_color="#8073AC", line_width=2)),
("oval", Oval(x="x", y="y", width=screen(15), height=screen(25), angle=-0.7, fill_color="#1D91C0")),
("patch", Patch(x="x", y="y", fill_color="#A6CEE3")),
("patches", Patches(xs="xs", ys="ys", fill_color="#FB9A99")),
("quad", Quad(left="x", right="xm01", top="y", bottom="ym01", fill_color="#B3DE69")),
("quadratic", Quadratic(x0="x", y0="y", x1="xp02", y1="y", cx="xp01", cy="yp01", line_color="#4DAF4A", line_width=3)),
("ray", Ray(x="x", y="y", length=45, angle=-0.7, line_color="#FB8072", line_width=2)),
("rect", Rect(x="x", y="y", width=screen(10), height=screen(20), angle=-0.7, fill_color="#CAB2D6")),
("segment", Segment(x0="x", y0="y", x1="xm01", y1="ym01", line_color="#F4A582", line_width=3)),
("wedge", Wedge(x="x", y="y", radius=screen(15), start_angle=0.6, end_angle=4.1, fill_color="#B3DE69")),
]
markers = [
("circle", Circle(x="x", y="y", radius=0.1, radius_units="data", fill_color="#3288BD")),
("circle_x", CircleX(x="x", y="y", size="sizes", line_color="#DD1C77", fill_color=None)),
("circle_cross", CircleCross(x="x", y="y", size="sizes", line_color="#FB8072", fill_color=None, line_width=2)),
("square", Square(x="x", y="y", size="sizes", fill_color="#74ADD1")),
("square_x", SquareX(x="x", y="y", size="sizes", line_color="#FDAE6B", fill_color=None, line_width=2)),
("square_cross", SquareCross(x="x", y="y", size="sizes", line_color="#7FC97F", fill_color=None, line_width=2)),
("diamond", Diamond(x="x", y="y", size="sizes", line_color="#1C9099", line_width=2)),
("diamond_cross", DiamondCross(x="x", y="y", size="sizes", line_color="#386CB0", fill_color=None, line_width=2)),
("triangle", Triangle(x="x", y="y", size="sizes", line_color="#99D594", line_width=2)),
("inverted_triangle", InvertedTriangle(x="x", y="y", size="sizes", line_color="#DE2D26", line_width=2)),
("cross", Cross(x="x", y="y", size="sizes", line_color="#E6550D", fill_color=None, line_width=2)),
("asterisk", Asterisk(x="x", y="y", size="sizes", line_color="#F0027F", fill_color=None, line_width=2)),
]
def make_tab(title, glyph):
plot = Plot(title=title, data_sources=[source], x_range=xdr, y_range=ydr)
renderer = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=glyph)
plot.renderers.append(renderer)
xaxis = LinearAxis(plot=plot, location="bottom")
plot.below.append(xaxis)
yaxis = LinearAxis(plot=plot, location="left")
plot.left.append(yaxis)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
tab = Panel(child=plot, title=title)
return tab
def make_tabs(objs):
return Tabs(tabs=[ make_tab(title, obj) for title, obj in objs ])
layout = VBox(children=[make_tabs(glyphs), make_tabs(markers)])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "glyphs.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Glyphs"))
print("Wrote %s" % filename)
view(filename)
|
the13fools/Bokeh_Examples
|
glyphs/glyphs.py
|
Python
|
bsd-3-clause
| 4,858
|
import os
def foo():
print(hello_world)
|
AtomLinter/linter-pyflakes
|
spec/fixtures/invalid.py
|
Python
|
mit
| 46
|
# This file is generated by /home/leo/pycode/nrg_mapping/source/setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
GiggleLiu/nrg_mapping
|
nrgmap/__config__.py
|
Python
|
mit
| 689
|
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Infrastructure Toolkit}.
"""
import sys
import os
import subprocess
import argparse
from entropy.const import const_file_readable
from entropy.i18n import _
from _entropy.eit.commands.descriptor import EitCommandDescriptor
from _entropy.eit.commands.command import EitCommand
class EitLog(EitCommand):
"""
Main Eit log command.
"""
NAME = "log"
ALIASES = []
ALLOW_UNPRIVILEGED = True
def _get_parser(self):
descriptor = EitCommandDescriptor.obtain_descriptor(
EitLog.NAME)
parser = argparse.ArgumentParser(
description=descriptor.get_description(),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s %s" % (sys.argv[0], EitLog.NAME))
parser.add_argument("repo", nargs='?', default=None,
metavar="<repo>", help=_("repository"))
return parser
INTRODUCTION = """\
Show log for given repository (if any, otherwise the current working one).
This commands opens repository ChangeLog.bz2 using *bzless*.
"""
def man(self):
"""
Overridden from EitCommand.
"""
return self._man()
def parse(self):
parser = self._get_parser()
try:
nsargs = parser.parse_args(self._args)
except IOError as err:
sys.stderr.write("%s\n" % (err,))
return parser.print_help, []
return self._call_exclusive, [self._log, nsargs.repo]
def _log(self, entropy_server):
changelog_path = \
entropy_server._get_local_repository_compressed_changelog_file(
entropy_server.repository())
if not const_file_readable(changelog_path):
entropy_server.output(
_("log is not available"),
importance=1, level="error")
return 1
proc = subprocess.Popen(["bzless", changelog_path])
return proc.wait()
EitCommandDescriptor.register(
EitCommandDescriptor(
EitLog,
EitLog.NAME,
_('show log for repository'))
)
|
Sabayon/entropy
|
server/eit/commands/log.py
|
Python
|
gpl-2.0
| 2,260
|
"""Contains the Credit (coin play) mode code"""
# credits.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
from math import floor
from mpf.system.data_manager import DataManager
from mpf.system.mode import Mode
class Credits(Mode):
def mode_init(self):
self.data_manager = DataManager(self.machine, 'earnings')
self.earnings = self.data_manager.get_data()
self.credit_units_per_game = 0
self.credit_units_inserted = 0
self.credit_unit = 0
self.max_credit_units = 0
self.pricing_tiers = set()
self.credit_units_for_pricing_tiers = 0
self.credits_config = self.machine.config['credits']
if 'credits' in self.config:
self.credits_config.update(self.config['credits'])
self.credits_config = self.machine.config_processor.process_config2(
'credits', self.credits_config, 'credits')
def mode_start(self, **kwargs):
self.add_mode_event_handler('enable_free_play',
self.enable_free_play)
self.add_mode_event_handler('enable_credit_play',
self.enable_credit_play)
self.add_mode_event_handler('toggle_credit_play',
self.toggle_credit_play)
self.add_mode_event_handler('slam_tilt',
self.clear_all_credits)
if self.credits_config['free_play']:
self.enable_free_play(post_event=False)
else:
self._calculate_credit_units()
self._calculate_pricing_tiers()
self.enable_credit_play(post_event=False)
def mode_stop(self, **kwargs):
self.enable_free_play()
def _calculate_credit_units(self):
# "credit units" are how we handle fractional credits (since most
# pinball machines show credits as fractions instead of decimals).
# We convert everything to the smallest coin unit and then track
# how many of those a game takes. So price of $0.75 per game with a
# quarter slot means a credit unit is 0.25 and the game needs 3 credit
# units to start. This is all hidden from the player
# We need to calculate it differently depending on how the coin switch
# values relate to game cost.
if self.credits_config['switches']:
min_currency_value = min(x['value'] for x in
self.credits_config['switches'])
else:
min_currency_value = (
self.credits_config['pricing_tiers'][0]['price'])
price_per_game = self.credits_config['pricing_tiers'][0]['price']
if min_currency_value == price_per_game:
self.credit_unit = min_currency_value
elif min_currency_value < price_per_game:
self.credit_unit = price_per_game - min_currency_value
if self.credit_unit > min_currency_value:
self.credit_unit = min_currency_value
elif min_currency_value > price_per_game:
self.credit_unit = min_currency_value - price_per_game
if self.credit_unit > price_per_game:
self.credit_unit = price_per_game
self.log.debug("Calculated the credit unit to be %s based on a minimum"
"currency value of %s and a price per game of %s",
self.credit_unit, min_currency_value, price_per_game)
self.credit_units_per_game = (
int(self.credits_config['pricing_tiers'][0]['price'] /
self.credit_unit))
self.log.debug("Credit units per game: %s", self.credit_units_per_game)
if self.credits_config['max_credits']:
self.max_credit_units = (self.credit_units_per_game *
self.credits_config['max_credits'])
def _calculate_pricing_tiers(self):
# pricing tiers are calculated with a set of tuples which indicate the
# credit units for the price break as well as the "bump" in credit
# units that should be added once that break is passed.
for pricing_tier in self.credits_config['pricing_tiers']:
credit_units = pricing_tier['price'] / self.credit_unit
actual_credit_units = self.credit_units_per_game * pricing_tier['credits']
bonus = actual_credit_units - credit_units
self.log.debug("Pricing Tier Bonus. Price: %s, Credits: %s. "
"Credit units for this tier: %s, Credit units this "
"tier buys: %s, Bonus bump needed: %s",
pricing_tier['price'], pricing_tier['credits'],
credit_units, actual_credit_units, bonus)
self.pricing_tiers.add((credit_units, bonus))
def enable_credit_play(self, post_event=True, **kwargs):
self.credits_config['free_play'] = False
if self.machine.is_machine_var('credit_units'):
credit_units = self.machine.get_machine_var('credit_units')
else:
credit_units = 0
if self.credits_config['persist_credits_while_off_time']:
self.machine.create_machine_var(name='credit_units',
value=credit_units,
persist=True,
expire_secs=self.credits_config[
'persist_credits_while_off_time'])
else:
self.machine.create_machine_var(name='credit_units',
value=credit_units)
self.machine.create_machine_var('credits_string', ' ')
self.machine.create_machine_var('credits_value', '0')
self.machine.create_machine_var('credits_whole_num', 0)
self.machine.create_machine_var('credits_numerator', 0)
self.machine.create_machine_var('credits_denominator', 0)
self._update_credit_strings()
self._enable_credit_switch_handlers()
# setup switch handlers
self.machine.events.add_handler('player_add_request',
self._player_add_request)
self.machine.events.add_handler('request_to_start_game',
self._request_to_start_game)
self.machine.events.add_handler('player_add_success',
self._player_add_success)
self.machine.events.add_handler('mode_game_started',
self._game_ended)
self.machine.events.add_handler('mode_game_ended',
self._game_started)
self.machine.events.add_handler('ball_starting',
self._ball_starting)
if post_event:
self.machine.events.post('enabling_credit_play')
def enable_free_play(self, post_event=True, **kwargs):
self.credits_config['free_play'] = True
self.machine.events.remove_handler(self._player_add_request)
self.machine.events.remove_handler(self._request_to_start_game)
self.machine.events.remove_handler(self._player_add_success)
self.machine.events.remove_handler(self._game_ended)
self.machine.events.remove_handler(self._game_started)
self.machine.events.remove_handler(self._ball_starting)
self._disable_credit_switch_handlers()
self._update_credit_strings()
if post_event:
self.machine.events.post('enabling_free_play')
def toggle_credit_play(self, **kwargs):
if self.credits_config['free_play']:
self.enable_credit_play()
else:
self.enable_free_play()
def _player_add_request(self):
if (self.machine.get_machine_var('credit_units') >=
self.credit_units_per_game):
self.log.debug("Received request to add player. Request Approved")
return True
else:
self.log.debug("Received request to add player. Request Denied")
self.machine.events.post("not_enough_credits")
return False
def _request_to_start_game(self):
if (self.machine.get_machine_var('credit_units') >=
self.credit_units_per_game):
self.log.debug("Received request to start game. Request Approved")
return True
else:
self.log.debug("Received request to start game. Request Denied")
self.machine.events.post("not_enough_credits")
return False
def _player_add_success(self, **kwargs):
new_credit_units = (self.machine.get_machine_var('credit_units') -
self.credit_units_per_game)
if new_credit_units < 0:
self.log.warning("Somehow credit units went below 0?!? Resetting "
"to 0.")
new_credit_units = 0
self.machine.set_machine_var('credit_units', new_credit_units)
self._update_credit_strings()
def _enable_credit_switch_handlers(self):
for switch_settings in self.credits_config['switches']:
self.machine.switch_controller.add_switch_handler(
switch_name=switch_settings['switch'].name,
callback=self._credit_switch_callback,
callback_kwargs={'value': switch_settings['value'],
'audit_class': switch_settings['type']})
for switch in self.credits_config['service_credits_switch']:
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self._service_credit_callback)
def _disable_credit_switch_handlers(self):
for switch_settings in self.credits_config['switches']:
self.machine.switch_controller.remove_switch_handler(
switch_name=switch_settings['switch'].name,
callback=self._credit_switch_callback)
for switch in self.credits_config['service_credits_switch']:
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self._service_credit_callback)
def _credit_switch_callback(self, value, audit_class):
self._add_credit_units(credit_units=value/self.credit_unit)
self._audit(value, audit_class)
def _service_credit_callback(self):
self.log.debug("Service Credit Added")
self.add_credit(price_tiering=False)
self._audit(1, 'service_credit')
def _add_credit_units(self, credit_units, price_tiering=True):
self.log.debug("Adding %s credit_units. Price tiering: %s",
credit_units, price_tiering)
previous_credit_units = self.machine.get_machine_var('credit_units')
total_credit_units = credit_units + previous_credit_units
# check for pricing tier
if price_tiering:
self.credit_units_for_pricing_tiers += credit_units
bonus_credit_units = 0
for tier_credit_units, bonus in self.pricing_tiers:
if self.credit_units_for_pricing_tiers % tier_credit_units == 0:
bonus_credit_units += bonus
total_credit_units += bonus_credit_units
max_credit_units = (self.credits_config['max_credits'] *
self.credit_units_per_game)
if max_credit_units and total_credit_units > max_credit_units:
self.log.debug("Max credits reached")
self._update_credit_strings()
self.machine.events.post('max_credits_reached')
self.machine.set_machine_var('credit_units', max_credit_units)
if max_credit_units > previous_credit_units:
self.log.debug("Credit units added")
self.machine.set_machine_var('credit_units', total_credit_units)
self._update_credit_strings()
self.machine.events.post('credits_added')
def add_credit(self, price_tiering=True):
"""Adds a single credit to the machine.
Args:
price_tiering: Boolean which controls whether this credit will be
eligible for the pricing tier bonuses. Default is True.
"""
self._add_credit_units(self.credit_units_per_game, price_tiering)
def _reset_pricing_tier_credits(self):
if not self.reset_pricing_tier_count_this_game:
self.log.debug("Resetting pricing tier credit count")
self.credit_units_for_pricing_tiers = 0
self.reset_pricing_tier_count_this_game = True
def _ball_starting(self, **kwargs):
if self.player.number == 1 and self.player.ball == 2:
self._reset_pricing_tier_credits()
def _update_credit_strings(self):
machine_credit_units = self.machine.get_machine_var('credit_units')
whole_num = int(floor(machine_credit_units /
self.credit_units_per_game))
numerator = int(machine_credit_units % self.credit_units_per_game)
denominator = int(self.credit_units_per_game)
if numerator:
if whole_num:
display_fraction = '{} {}/{}'.format(whole_num, numerator,
denominator)
else:
display_fraction = '{}/{}'.format(numerator, denominator)
else:
display_fraction = str(whole_num)
if self.credits_config['free_play']:
display_string = self.credits_config['free_play_string']
else:
display_string = '{} {}'.format(
self.credits_config['credits_string'], display_fraction)
self.machine.set_machine_var('credits_string', display_string)
self.machine.set_machine_var('credits_value', display_fraction)
self.machine.set_machine_var('credits_whole_num', whole_num)
self.machine.set_machine_var('credits_numerator', numerator)
self.machine.set_machine_var('credits_denominator', denominator)
def _audit(self, value, audit_class):
if audit_class not in self.earnings:
self.earnings[audit_class] = dict()
self.earnings[audit_class]['total_value'] = 0
self.earnings[audit_class]['count'] = 0
self.earnings[audit_class]['total_value'] += value
self.earnings[audit_class]['count'] += 1
self.data_manager.save_all(data=self.earnings)
def _game_started(self):
self.log.debug("Removing credit clearing delays")
self.delay.remove('clear_fractional_credits')
self.delay.remove('clear_all_credits')
def _game_ended(self):
if self.credits_config['fractional_credit_expiration_time']:
self.log.debug("Adding delay to clear fractional credits")
self.delay.add(
ms=self.credits_config['fractional_credit_expiration_time'],
callback=self._clear_fractional_credits,
name='clear_fractional_credits')
if self.credits_config['credit_expiration_time']:
self.log.debug("Adding delay to clear credits")
self.delay.add(
ms=self.credits_config['credit_expiration_time'],
callback=self.clear_all_credits,
name='clear_all_credits')
self.reset_pricing_tier_count_this_game = False
def _clear_fractional_credits(self):
self.log.debug("Clearing fractional credits")
credit_units = self.machine.get_machine_var('credit_units')
credit_units -= credit_units % self.credit_units_per_game
self.machine.set_machine_var('credit_units', credit_units)
self._update_credit_strings()
def clear_all_credits(self):
self.log.debug("Clearing all credits")
self.machine.set_machine_var('credit_units', 0)
self._update_credit_strings()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
qcapen/mpf
|
mpf/modes/credits/code/credits.py
|
Python
|
mit
| 17,280
|
"""
toasterReflow.py
Alexander Hiam
A web interface for the toasterReflow reflow toaster oven control
library.
Apache 2.0 license.
"""
import inspect, json, threading
from flask import Flask, g, redirect, url_for, request,\
session, render_template, flash
from functools import wraps
from toasterReflow import *
from bbio import *
from bbio.libraries.MAX31855 import MAX31855
# Set variables for the pins connected to the ADC:
#PyBBIO now uses hardware SPI for MAX31855
#data_pin = GPIO1_15 # P8.15
#clk_pin = GPIO1_14 # P8.16
#cs_pin = GPIO0_27 # P8.17
heater2_pin = "GPIO2_25" # P8.18
heater3_pin = "GPIO2_24"
heater_pin = "GPIO2_23"
#fan_pin = GPIO1_31 # P8.20
#data_pin = GPIO2_6
#clk_pin = GPIO2_10
#cs_pin = GPIO2_8
fan_pin = "GPIO2_22"
SPI1.open()
thermo = MAX31855(SPI1,0)
print thermo.readTempC()
delay(100)
reflow_oven = Oven(heater_pin, 0, fan_pin ,heater2_pin, heater3_pin)
rs_buzz = buzzer("P8_12")
#Do a test buzz
rs_buzz.buzz(1000,0.5)
app = Flask(__name__)
app.config.from_pyfile('toasterReflow.cfg')
app.config['NAVIGATION'] = [['Oven Control', '/'],
['Profile Editor', '/profile-editor']]
#Initialise OLED
oled_init()
oled_clearDisplay()
oled_setNormalDisplay()
# Global variable to store the currently selected profile; used when
# starting reflow:
current_profile = None
def requireLogin(f):
@wraps(f)
def loginCheck(*args, **kwargs):
if session.get('logged_in') is None:
# User not logged in, redirect to login page
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return loginCheck
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
return redirect(url_for('ovenControl'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
""" Log user out and redirect to the root page. """
session.pop('logged_in', None)
flash('Logged out')
return redirect('/')
@app.route('/')
@requireLogin
def ovenControl():
return render_template('oven.html')
@app.route('/profile-editor')
@requireLogin
def list_profiles():
#for i in inspect.getmembers(profiles, lambda member: type(member) == dict):
# if i[0] in dir(profiles) and i[0] != "__builtins__":
# # profiles_str += "<h3>%s:</h3>\n<p>%s<\p>\n" % (i[0], i[1])
# profiles_str += '<h3><a href="/profiles/%s">%s</a></h3>' % (i[0], i[0])
return render_template('profile-editor.html')
#-- Oven API --#
@app.route('/profiles', methods=['GET'])
@requireLogin
def getProfileNames():
""" Returns a JSON list of all the profile names. """
profile_names = []
for name, content in inspect.getmembers(profiles,
lambda member: type(member)==dict):
if name in dir(profiles) and name != "__builtins__":
profile_names.append(name)
return json.dumps(profile_names)
@app.route('/profiles/<profile_name>', methods=['GET'])
@requireLogin
def getProfile(profile_name):
""" """
for name, content in inspect.getmembers(profiles,
lambda member: type(member)==dict):
if (name == profile_name):
global current_profile
current_profile = content
return json.dumps(reflow_oven.generateMap(content))
return "Error: profile not found: '%s'" % name, 404
@app.route('/phases', methods=['GET'])
@requireLogin
def getPhases():
""" Returns JSON list of the reflow phases. """
return json.dumps(PHASES)
@app.route('/current-phase', methods=['GET'])
@requireLogin
def getCurrentPhase():
""" Returns the current reflow phases. """
oled_setTextXY(3,0)
oled_putString(reflow_oven.current_phase+" ")
return json.dumps(reflow_oven.current_phase)
@app.route('/current-step', methods=['GET'])
@requireLogin
def getCurrentStep():
""" Returns the current step number in the current reflow phase. """
return json.dumps(reflow_oven.current_step)
@app.route('/time-step', methods=['GET'])
@requireLogin
def getTimeStep():
""" Returns the reflow oven's time step. """
return json.dumps(TIME_STEP_MS)
@app.route('/realtime-data', methods=['GET'])
@requireLogin
def getRealtimeData():
""" Returns the reflow oven's real-time data series. """
return json.dumps(reflow_oven.realtime_data)
@app.route('/status', methods=['GET'])
@requireLogin
def getStatus():
""" Returns 'on' if the oven is running, 'off' if not. """
return json.dumps('on' if reflow_oven.current_phase else 'off')
@app.route('/error', methods=['GET'])
@requireLogin
def getError():
""" Returns oven error message. """
return json.dumps(reflow_oven.error)
@app.route('/control/temperature', methods=['GET'])
@requireLogin
def temperature():
""" Returns the oven temperature in Celsius, or error message if error
encountered. """
temp = reflow_oven.getTemp()
if (temp == None):
return json.dumps(reflow_oven.thermocouple.error)
oled_setTextXY(2,0)
oled_putString("Temp:"+str(temp))
return json.dumps(temp)
@app.route('/control/heater', methods=['GET', 'POST'])
@requireLogin
def heaterControl():
if (request.method == 'GET'):
return json.dumps(reflow_oven.heat_state)
else:
state = request.form['state']
if (state == 'on'):
reflow_oven.heatOn()
return json.dumps(state)
elif (state == 'off'):
reflow_oven.heatOff()
return json.dumps(state)
return "Error: Unkown heater state '%s'" % state, 404
@app.route('/control/fan', methods=['GET', 'POST'])
@requireLogin
def fanControl():
if (request.method == 'GET'):
return json.dumps(reflow_oven.fan_state)
else:
state = request.form['state']
if (state == 'on'):
reflow_oven.fanOn()
return json.dumps(state)
elif (state == 'off'):
reflow_oven.fanOff()
return json.dumps(state)
return "Error: Unkown heater state '%s'" % state, 404
@app.route('/control/start', methods=['POST'])
@requireLogin
def startReflow():
def reflowInThread():
try: reflow_oven.run(current_profile)
except Exception, e:
print e
threading.Thread(target=reflowInThread).start()
return ''
@app.route('/control/stop', methods=['POST'])
@requireLogin
def stopReflow():
reflow_oven.abort = True
return ''
#--------------------#
if __name__ == "__main__":
app.debug=True
app.run(host='0.0.0.0',port=8000)
# Make sure the oven is off once the server is stopped:
reflow_oven.abort = True
|
whatnick/toasterReflow
|
toasterReflow.py
|
Python
|
apache-2.0
| 6,699
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-translate"
description = "Google Cloud Translation API client library"
version = "2.0.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
"google-cloud-core >= 1.0.3, < 2.0dev",
]
extras = {}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
|
tseaver/google-cloud-python
|
translate/setup.py
|
Python
|
apache-2.0
| 2,783
|
#!/usr/bin/env python3
from python_utility.command_process import CommandProcess
def main():
process = CommandProcess(
arguments=[
'flake8',
'--exclude', '.git,.idea,.tox',
'--verbose',
'--max-complexity', '5'
],
)
process.print_output()
if __name__ == '__main__':
main()
|
FunTimeCoding/jenkins-job-manager
|
script/python/flake8.py
|
Python
|
mit
| 357
|
#
# This file is part of python-dbusx. Python-dbusx is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012-2013 the python-dbusx authors. See the file "AUTHORS"
# for a complete list.
import dbusx
class Message(dbusx.MessageBase):
"""
A Message that is sent or received on a D-BUS connection.
A message consists of header fields and arguments. Both are exposed as
attributes on instances of this class.
"""
def __init__(self, type, no_reply=False, no_auto_start=False, serial=None,
reply_serial=None, path=None, interface=None, member=None,
error_name=None, destination=None, sender=None):
"""Create a new message.
Only the *type* argument is mandatory, all other arguments are
optional. The arguments have the same meaning as their respective class
attributes.
When using this constructor, it is your responsibility to check that
the required arguments for the specific message type are set. If you
fail to do this, sending out this message on a connection will fail.
For more user-friendly constructors, see :meth:`method_call` and
:meth:`signal`.
"""
super(Message, self).__init__(type)
self.no_reply = no_reply
self.no_auto_start = no_auto_start
if serial is not None:
self.serial = serial
if reply_serial is not None:
self.reply_serial = reply_serial
if path is not None:
self.path = path
if interface is not None:
self.interface = interface
if member is not None:
self.member = member
if error_name is not None:
self.error_name = error_name
if destination is not None:
self.destination = destination
if sender is not None:
self.sender = sender
@classmethod
def method_call(cls, service, path, interface, method, signature=None,
args=None):
"""Create a new METHOD_CALL message.
This creates a method call for the method *method* on interface
*interface* at the bus name *service* under the path *path*.
"""
message = cls(dbusx.MESSAGE_TYPE_METHOD_CALL, destination=service,
path=path, interface=interface, member=method)
if signature is not None:
message.set_args(signature, messge)
return message
@classmethod
def signal(cls, service, path, interface, signal, signature=None,
args=None):
"""Create a new SIGNAL message.
This represents a signal with name *signal* on interface *interface*
that is emitted by the service at bus name *service* under path *path*.
"""
message = cls(dbusx.MESSAGE_TYPE_SIGNAL, destination=service,
path=path, interface=interface, member=signal)
if signature is not None:
message.set_args(signature, args)
return message
def reply(self, signature=None, args=None):
"""Create a METHOD_RETURN message.
The message represents a reply to the current message, which must be a
METHOD_CALL message.
"""
if self.type != dbusx.MESSAGE_TYPE_METHOD_CALL:
raise TypeError('Cannot create reply to message of type %s'
% self.type)
message = type(self)(dbusx.MESSAGE_TYPE_METHOD_RETURN,
reply_serial=self.serial, destination=self.sender)
if signature is not None:
message.set_args(signature, args)
return message
def error_reply(self, error_name, signature=None, args=None):
"""Create an ERROR message.
The message represents an error reply to the current message, which
must be a METHOD_CALL message.
"""
if self.type != dbusx.MESSAGE_TYPE_METHOD_CALL:
raise TypeError('Cannot create error reply to message of type %s'
% self.type)
message = type(self)(dbusx.MESSAGE_TYPE_ERROR, error_name=error_name,
reply_serial=self.serial, destination=self.sender)
if signature is not None:
message.set_args(signature, args)
return message
|
geertj/python-dbusx
|
lib/dbusx/message.py
|
Python
|
mit
| 4,446
|
from time import sleep
from benchmarking.benchmark import Benchmark
from benchmarking.evaluator import SimpleEvaluator
def _benchmark_test():
# TODO: Remove this function before finishing project. This method is for testing/example purposes only!
def method1(arg):
sleep(1)
return arg
def method2(arg):
sleep(3)
return arg
def method3(arg):
return arg + 1
test_cases = set([(i, i) for i in xrange(10)])
benchmark = Benchmark(test_cases, SimpleEvaluator())
print "Method1 scored %.2f" % benchmark.evaluate_method(method=method1)
print "Method2 scored %.2f" % benchmark.evaluate_method(method=method2)
print "Method3 scored %.2f" % benchmark.evaluate_method(method=method3)
if __name__ == "__main__":
print "Bioinformatics 2017 project"
_benchmark_test()
|
Waszker/Bioinformatics
|
program/main.py
|
Python
|
mit
| 845
|
#!/usr/bin/env python
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import ycm_core
from ycmd.completers.cpp.clang_completer import ClangCompleter
def GetCompleter( user_options ):
if ycm_core.HasClangSupport():
return ClangCompleter( user_options )
else:
return None
|
amchoukir/ycmd
|
ycmd/completers/objc/hook.py
|
Python
|
gpl-3.0
| 957
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from translate.filters import checks
from django.db import IntegrityError
from pytest_pootle.factories import LanguageDBFactory
from pootle.core.delegate import revision
from pootle_app.models import Directory
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
@pytest.mark.django_db
def test_tp_create_fail(po_directory, tutorial, english):
# Trying to create a TP with no Language raises a RelatedObjectDoesNotExist
# which can be caught with Language.DoesNotExist
with pytest.raises(Language.DoesNotExist):
TranslationProject.objects.create()
# TP needs a project set too...
with pytest.raises(Project.DoesNotExist):
TranslationProject.objects.create(language=english)
# There is already an english tutorial was automagically set up
with pytest.raises(IntegrityError):
TranslationProject.objects.create(project=tutorial, language=english)
@pytest.mark.django_db
def test_tp_create_parent_dirs(tp0):
parent = tp0.create_parent_dirs("%sfoo/bar/baz.po" % tp0.pootle_path)
assert (
parent
== Directory.objects.get(
pootle_path="%sfoo/bar/" % tp0.pootle_path))
@pytest.mark.django_db
def test_tp_create_templates(project0_nongnu, project0,
templates, no_templates_tps, complex_ttk):
# As there is a tutorial template it will automatically create stores for
# our new TP
template_tp = TranslationProject.objects.create(
language=templates, project=project0)
template = Store.objects.create(
name="foo.pot",
translation_project=template_tp,
parent=template_tp.directory)
template.update(complex_ttk)
tp = TranslationProject.objects.create(
project=project0, language=LanguageDBFactory())
tp.init_from_templates()
assert tp.stores.count() == template_tp.stores.count()
assert (
[(s, t)
for s, t
in template_tp.stores.first().units.values_list("source_f",
"target_f")]
== [(s, t)
for s, t
in tp.stores.first().units.values_list("source_f",
"target_f")])
@pytest.mark.django_db
def test_tp_init_from_template_po(project0, templates,
no_templates_tps, complex_ttk):
# When initing a tp from a file called `template.pot` the resulting
# store should be called `langcode.po` if the project is gnuish
project0.config["pootle_fs.translation_mappings"] = dict(
default="/<dir_path>/<language_code>.<ext>")
template_tp = TranslationProject.objects.create(
language=templates, project=project0)
template = Store.objects.create(
name="template.pot",
translation_project=template_tp,
parent=template_tp.directory)
template.update(complex_ttk)
tp = TranslationProject.objects.create(
project=project0, language=LanguageDBFactory())
tp.init_from_templates()
store = tp.stores.get()
assert store.name == "%s.po" % tp.language.code
@pytest.mark.django_db
def test_tp_create_with_files(project0_directory, project0, store0, settings):
# lets add some files by hand
trans_dir = settings.POOTLE_TRANSLATION_DIRECTORY
language = LanguageDBFactory()
tp_dir = os.path.join(trans_dir, "%s/project0" % language.code)
os.makedirs(tp_dir)
with open(os.path.join(tp_dir, "store0.po"), "w") as f:
f.write(store0.serialize())
TranslationProject.objects.create(project=project0, language=language)
@pytest.mark.django_db
def test_tp_stats_created_from_template(po_directory, templates, tutorial):
language = LanguageDBFactory(code="foolang")
tp = TranslationProject.objects.create(language=language, project=tutorial)
tp.init_from_templates()
assert tp.stores.all().count() == 1
stats = tp.data_tool.get_stats()
assert stats['total'] == 2 # there are 2 words in test template
assert stats['translated'] == 0
assert stats['fuzzy'] == 0
assert stats['suggestions'] == 0
assert stats['critical'] == 0
@pytest.mark.django_db
def test_can_be_inited_from_templates(po_directory, tutorial, templates):
language = LanguageDBFactory()
tp = TranslationProject(project=tutorial, language=language)
assert tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_cannot_be_inited_from_templates(project0, no_templates_tps):
language = LanguageDBFactory()
tp = TranslationProject(project=project0, language=language)
assert not tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_tp_checker(po_directory, tp_checker_tests):
language = Language.objects.get(code="language0")
checker_name_, project = tp_checker_tests
tp = TranslationProject.objects.create(project=project, language=language)
checkerclasses = [
checks.projectcheckers.get(tp.project.checkstyle,
checks.StandardChecker)
]
assert [x.__class__ for x in tp.checker.checkers] == checkerclasses
@pytest.mark.django_db
def test_tp_cache_on_delete(tp0):
proj_revision = revision.get(
tp0.project.directory.__class__)(
tp0.project.directory)
orig_revision = proj_revision.get("stats")
tp0.delete()
assert (
proj_revision.get("stats")
!= orig_revision)
|
unho/pootle
|
tests/models/translationproject.py
|
Python
|
gpl-3.0
| 5,854
|
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework import arg_scope
from tensormate.graph import TfGgraphBuilder
class ImageGraphBuilder(TfGgraphBuilder):
def __init__(self, scope=None, device=None, plain=False, data_format="NHWC",
data_format_ops=(layers.conv2d,
layers.convolution2d,
layers.convolution2d_transpose,
layers.convolution2d_in_plane,
layers.convolution2d_transpose,
layers.conv2d_in_plane,
layers.conv2d_transpose,
layers.separable_conv2d,
layers.separable_convolution2d,
layers.avg_pool2d,
layers.max_pool2d,
layers.batch_norm)):
super(ImageGraphBuilder, self).__init__(scope=scope, device=device, plain=plain)
self.data_format = data_format
self.data_format_ops = data_format_ops if data_format_ops is not None else []
def _call_body(self, *args, **kwargs):
# is_training = kwargs.get("is_training", True)
# reuse = self.ref_count > 0
with tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE):
with arg_scope(self.data_format_ops, data_format=self.data_format):
if self._device is None:
output = self._build(*args, **kwargs)
else:
with tf.device(self._device):
output = self._build(*args, **kwargs)
return output
|
songgc/tensormate
|
tensormate/graph/image_graph.py
|
Python
|
apache-2.0
| 1,742
|
#-*- coding: utf-8 -*-
# Creation Date : 2016-10-18
# Created by : Antoine LeBel
import archive
import intranet_copy
import local_copy
import web_copy
print("Le programme démarre")
archive = archive.Archive()
try:
print("Tentative de copy")
archive.perform_location_copy()
except AttributeError:
print("Le programme n'a pas encore définit son comportement de copie")
print("Le programme définit sa location")
if archive.get_location() == "web" :
archive.set_location_copy_process(web_copy.WebCopy())
elif archive.get_location() == "intranet":
archive.set_location_copy_process(intranet_copy.IntranetCopy())
else:
archive.set_location_copy_process(local_copy.LocalCopy())
print("Le programme a définit son comportement selon son endroit")
archive.perform_location_copy()
|
Pobux/pattern_conception
|
strategy/main.py
|
Python
|
mit
| 803
|
import asyncio
from gear import Database
from gear.cloud_config import get_gcp_config
from hailtop import aiotools
from hailtop.aiocloud import aiogoogle
from hailtop.utils import RateLimit, periodically_call
from ....driver.driver import CloudDriver, process_outstanding_events
from ....driver.instance_collection import InstanceCollectionManager, JobPrivateInstanceManager, Pool
from ....inst_coll_config import InstanceCollectionConfigs
from .activity_logs import process_activity_log_events_since
from .billing_manager import GCPBillingManager
from .disks import delete_orphaned_disks
from .resource_manager import GCPResourceManager
from .zones import ZoneMonitor
class GCPDriver(CloudDriver):
@staticmethod
async def create(
app,
db: Database, # BORROWED
machine_name_prefix: str,
namespace: str,
inst_coll_configs: InstanceCollectionConfigs,
credentials_file: str,
task_manager: aiotools.BackgroundTaskManager, # BORROWED
) -> 'GCPDriver':
gcp_config = get_gcp_config()
project = gcp_config.project
zone = gcp_config.zone
regions = gcp_config.regions
compute_client = aiogoogle.GoogleComputeClient(project, credentials_file=credentials_file)
activity_logs_client = aiogoogle.GoogleLoggingClient(
credentials_file=credentials_file,
# The project-wide logging quota is 60 request/m. The event
# loop sleeps 15s per iteration, so the max rate is 4
# iterations/m. Note, the event loop could make multiple
# logging requests per iteration, so these numbers are not
# quite comparable. I didn't want to consume the entire quota
# since there will be other users of the logging API (us at
# the web console, test deployments, etc.)
rate_limit=RateLimit(10, 60),
)
zone_monitor = await ZoneMonitor.create(compute_client, regions, zone)
billing_manager = await GCPBillingManager.create(db)
inst_coll_manager = InstanceCollectionManager(db, machine_name_prefix, zone_monitor)
resource_manager = GCPResourceManager(project, compute_client, billing_manager)
create_pools_coros = [
Pool.create(
app,
db,
inst_coll_manager,
resource_manager,
machine_name_prefix,
config,
app['async_worker_pool'],
task_manager,
)
for pool_name, config in inst_coll_configs.name_pool_config.items()
]
jpim, *_ = await asyncio.gather(
JobPrivateInstanceManager.create(
app,
db,
inst_coll_manager,
resource_manager,
machine_name_prefix,
inst_coll_configs.jpim_config,
task_manager,
),
*create_pools_coros
)
driver = GCPDriver(
db,
machine_name_prefix,
compute_client,
activity_logs_client,
project,
namespace,
zone_monitor,
inst_coll_manager,
jpim,
billing_manager,
)
task_manager.ensure_future(periodically_call(15, driver.process_activity_logs))
task_manager.ensure_future(periodically_call(60, zone_monitor.update_region_quotas))
task_manager.ensure_future(periodically_call(60, driver.delete_orphaned_disks))
task_manager.ensure_future(periodically_call(300, billing_manager.refresh_resources))
return driver
def __init__(
self,
db: Database,
machine_name_prefix: str,
compute_client: aiogoogle.GoogleComputeClient,
activity_logs_client: aiogoogle.GoogleLoggingClient,
project: str,
namespace: str,
zone_monitor: ZoneMonitor,
inst_coll_manager: InstanceCollectionManager,
job_private_inst_manager: JobPrivateInstanceManager,
billing_manager: GCPBillingManager,
):
self.db = db
self.machine_name_prefix = machine_name_prefix
self.compute_client = compute_client
self.activity_logs_client = activity_logs_client
self.project = project
self.namespace = namespace
self.zone_monitor = zone_monitor
self.inst_coll_manager = inst_coll_manager
self.job_private_inst_manager = job_private_inst_manager
self.billing_manager = billing_manager
async def shutdown(self) -> None:
try:
await self.compute_client.close()
finally:
await self.activity_logs_client.close()
async def process_activity_logs(self) -> None:
async def _process_activity_log_events_since(mark):
return await process_activity_log_events_since(
self.db,
self.inst_coll_manager,
self.activity_logs_client,
self.zone_monitor.zone_success_rate,
self.machine_name_prefix,
self.project,
mark,
)
await process_outstanding_events(self.db, _process_activity_log_events_since)
async def delete_orphaned_disks(self) -> None:
await delete_orphaned_disks(
self.compute_client, self.zone_monitor.zones, self.inst_coll_manager, self.namespace
)
|
hail-is/hail
|
batch/batch/cloud/gcp/driver/driver.py
|
Python
|
mit
| 5,479
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Clearbit Slack Notifier documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 2 00:49:26 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import clearbit_slack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Clearbit Slack Notifier'
copyright = '2015, Paul Logston'
author = 'Paul Logston'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = clearbit_slack.__version__
# The full version, including alpha/beta/rc tags.
release = clearbit_slack.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClearbitSlackNotifierdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ClearbitSlackNotifier.tex', 'Clearbit Slack Notifier Documentation',
'Paul Logston', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clearbitslacknotifier', 'Clearbit Slack Notifier Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ClearbitSlackNotifier', 'Clearbit Slack Notifier Documentation',
author, 'ClearbitSlackNotifier', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
15five/clearbit-slack-python
|
docs/conf.py
|
Python
|
mit
| 9,661
|
"""Main PAC class."""
import numpy as np
import logging
from tensorpac.spectral import spectral, hilbertm
from tensorpac.methods import (get_pac_fcn, pacstr, compute_surrogates,
erpac, ergcpac, _ergcpac_perm, preferred_phase,
normalize)
from tensorpac.gcmi import copnorm
from tensorpac.visu import _PacVisual, _PacPlt, _PolarPlt
from tensorpac.io import set_log_level
from tensorpac.config import CONFIG
logger = logging.getLogger('tensorpac')
class _PacObj(object):
"""Main class for relative PAC objects."""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7):
# Frequency checking :
from tensorpac.utils import pac_vec
self._f_pha, self._f_amp = pac_vec(f_pha, f_amp)
self._xvec, self._yvec = self.f_pha.mean(1), self.f_amp.mean(1)
# Check spectral properties :
self._speccheck(dcomplex, cycle, width)
def __str__(self):
"""String representation."""
return self.method
def filter(self, sf, x, ftype='phase', keepfilt=False, edges=None,
n_jobs=-1):
"""Filt the data in the specified frequency bands.
Parameters
----------
sf : float
The sampling frequency.
x : array_like
Array of data of shape (n_epochs, n_times)
ftype : {'phase', 'amplitude'}
Specify if you want to extract phase ('phase') or the amplitude
('amplitude').
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
keepfilt : bool | False
Specify if you only want the filtered data (True). This parameter
is only available with dcomplex='hilbert' and not wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
xfilt : array_like
The filtered data of shape (n_freqs, n_epochs, n_times)
"""
# ---------------------------------------------------------------------
# check inputs
assert isinstance(sf, (int, float)), ("The sampling frequency must be "
"a float number.")
# Compatibility between keepfilt and wavelet :
if (keepfilt is True) and (self._dcomplex is 'wavelet'):
raise ValueError("Using wavelet for the complex decomposition do "
"not allow to get filtered data only. Set the "
"keepfilt parameter to False or set dcomplex to "
"'hilbert'.")
assert ftype in ['phase', 'amplitude'], ("ftype must either be 'phase'"
" or 'amplitude.'")
mne_epochs_type = CONFIG['MNE_EPOCHS_TYPE']
if not isinstance(x, np.ndarray) and type(x) in mne_epochs_type:
x = x.get_data()
sf = x.info['sfreq']
if x.ndim == 1:
x = x[np.newaxis, :]
assert x.ndim == 2, ("x should be a 2d array like (n_epochs, n_times)")
# check edges
if not isinstance(edges, int):
edges = slice(None)
else:
logger.debug(f" Edges {edges} time samples ignored")
edges = slice(edges, -edges)
self._edges = edges
# ---------------------------------------------------------------------
# Switch between phase or amplitude :
if ftype is 'phase':
tosend = 'pha' if not keepfilt else None
xfilt = spectral(x, sf, self.f_pha, tosend, self._dcomplex,
self._cycle[0], self._width, n_jobs)
elif ftype is 'amplitude':
tosend = 'amp' if not keepfilt else None
xfilt = spectral(x, sf, self.f_amp, tosend, self._dcomplex,
self._cycle[1], self._width, n_jobs)
return xfilt[..., edges]
def _speccheck(self, dcomplex=None, cycle=None, width=None):
"""Check spectral parameters."""
# Check cycle :
if cycle is not None:
cycle = np.asarray(cycle)
if (len(cycle) is not 2) or not cycle.dtype == int:
raise ValueError("Cycle must be a tuple of two integers.")
else:
self._cycle = cycle
# Check complex decomposition :
if dcomplex is not None:
if dcomplex not in ['hilbert', 'wavelet']:
raise ValueError("dcomplex must either be 'hilbert' or "
"'wavelet'.")
else:
self._dcomplex = dcomplex
# Convert Morlet's width :
if width is not None:
self._width = int(width)
@staticmethod
def _phampcheck(pha, amp):
"""Check phase and amplitude values."""
assert pha.ndim == 3, ("`pha` should have a shape of (n_pha, n_epochs,"
" n_times)")
assert amp.ndim == 3, ("`amp` should have a shape of (n_pha, n_epochs,"
" n_times)")
assert pha.shape[1:] == amp.shape[1:], ("`pha` and `amp` must have the"
" same number of trials, "
"channels and time points")
if not np.ptp(pha) <= 2 * np.pi:
logger.error("Your phase is probably in degrees and should be "
"converted in radians using either np.degrees or "
"np.deg2rad.")
# force the phase to be in [-pi, pi]
pha = (pha + np.pi) % (2. * np.pi) - np.pi
return pha, amp
def _infer_pvalues(self, effect, perm, p=.05, mcp='maxstat'):
"""Global function for statistical inferences.
In order to work this method requires :
* effect = array like of shape (n_dims...)
* perm = array like of shape (n_perm, n_dims...)
"""
assert all([isinstance(k, np.ndarray) for k in (effect, perm)])
n_perm = perm.shape[0]
# compute the minimum number of required permutations
n_perm_req = int(10. / p)
if n_perm < n_perm_req:
logger.warning(f"For inferences at p<{p}, it is recommended to per"
f"form at least n_perm={n_perm_req} permutations")
# ---------------------------------------------------------------------
logger.info(f" infer p-values at (p={p}, mcp={mcp})")
# computes the pvalues
if mcp is 'maxstat':
max_p = perm.reshape(n_perm, -1).max(1)[np.newaxis, ...]
nb_over = (effect[..., np.newaxis] <= max_p).sum(-1)
pvalues = nb_over / n_perm
# non-signi. p-values are set to 1. and min(pvalues) = 1 / n_perm
pvalues[pvalues >= p] = 1.
pvalues = np.maximum(1. / n_perm, pvalues)
elif mcp in ['fdr', 'bonferroni']:
from mne.stats import fdr_correction, bonferroni_correction
fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction
# compute the p-values
pvalues = (effect[np.newaxis, ...] <= perm).sum(0) / n_perm
pvalues = np.maximum(1. / n_perm, pvalues)
# apply correction
is_signi, pvalues = fcn(pvalues, alpha=p)
pvalues[~is_signi] = 1.
return pvalues
@property
def f_pha(self):
"""Vector of phases of shape (n_pha, 2)."""
return self._f_pha
@property
def f_amp(self):
"""Vector of amplitudes of shape (n_amp, 2)."""
return self._f_amp
@property
def xvec(self):
"""Vector of phases of shape (n_pha,) use for plotting."""
return self._xvec
@property
def yvec(self):
"""Vector of amplitudes of shape (n_amp,) use for plotting."""
return self._yvec
# ----------- DCOMPLEX -----------
@property
def dcomplex(self):
"""Get the dcomplex value."""
return self._dcomplex
@dcomplex.setter
def dcomplex(self, value):
"""Set dcomplex value."""
self._speccheck(dcomplex=value)
# ----------- CYCLE -----------
@property
def cycle(self):
"""Get the cycle value."""
return self._cycle
@cycle.setter
def cycle(self, value):
"""Set cycle value."""
self._speccheck(cycle=value)
# ----------- WIDTH -----------
@property
def width(self):
"""Get the width value."""
return self._width
@width.setter
def width(self, value):
"""Set width value."""
self._width = value
class Pac(_PacObj, _PacPlt):
"""Compute Phase-Amplitude Coupling (PAC).
Computing PAC is assessed in three steps : compute the real PAC, compute
surrogates and finally, because PAC is very sensible to the noise, correct
the real PAC by the surrogates. This implementation is modular i.e. it lets
you choose among a large range of possible combinations.
Parameters
----------
idpac : tuple/list | (1, 1, 3)
Choose the combination of methods to use in order to extract PAC.
This tuple must be composed of three integers where each one them
refer
* First digit : refer to the pac method
- 1 : Mean Vector Length (MVL) :cite:`canolty2006high`
(see :func:`tensorpac.methods.mean_vector_length`)
- 2 : Modulation Index (MI) :cite:`tort2010measuring`
(see :func:`tensorpac.methods.modulation_index`)
- 3 : Heights Ratio (HR) :cite:`lakatos2005oscillatory`
(see :func:`tensorpac.methods.heights_ratio`)
- 4 : ndPAC :cite:`ozkurt2012statistically`
(see :func:`tensorpac.methods.norm_direct_pac`)
- 5 : Phase-Locking Value (PLV)
:cite:`penny2008testing,lachaux1999measuring`
(see :func:`tensorpac.methods.phase_locking_value`)
- 6 : Gaussian Copula PAC (GCPAC) :cite:`ince2017statistical`
(see :func:`tensorpac.methods.gauss_cop_pac`)
* Second digit : refer to the method for computing surrogates
- 0 : No surrogates
- 1 : Swap phase / amplitude across trials
:cite:`tort2010measuring`
(see :func:`tensorpac.methods.swap_pha_amp`)
- 2 : Swap amplitude time blocks
:cite:`bahramisharif2013propagating`
(see :func:`tensorpac.methods.swap_blocks`)
- 3 : Time lag :cite:`canolty2006high`
(see :func:`tensorpac.methods.time_lag`)
* Third digit : refer to the normalization method for correction
- 0 : No normalization
- 1 : Substract the mean of surrogates
- 2 : Divide by the mean of surrogates
- 3 : Substract then divide by the mean of surrogates
- 4 : Z-score
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
* Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors. In that
case, it uses the definition proposed by Bahramisharif et al.
2013 :cite:`bahramisharif2013propagating` i.e
f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude :cite:`bahramisharif2013propagating`.
width : int | 7
Width of the Morlet's wavelet.
n_bins : int | 18
Number of bins for the KLD and HR PAC method
:cite:`tort2010measuring,lakatos2005oscillatory`
"""
def __init__(self, idpac=(1, 2, 3), f_pha=[2, 4], f_amp=[60, 200],
dcomplex='hilbert', cycle=(3, 6), width=7, n_bins=18,
verbose=None):
"""Check and initialize."""
set_log_level(verbose)
self._idcheck(idpac)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
self.n_bins = int(n_bins)
logger.info("Phase Amplitude Coupling object defined")
def fit(self, pha, amp, n_perm=200, p=.05, mcp='maxstat', n_jobs=-1,
random_state=None, verbose=None):
"""Compute PAC on filtered data.
Parameters
----------
pha : array_like
Array of phases of shape (n_pha, n_epochs, n_times).
Angles should be in rad.
amp : array_like
Array of amplitudes of shape (n_amp, n_epochs, n_times).
n_perm : int | 200
Number of surrogates to compute.
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
random_state : int | None
Fix the random state of the machine for reproducible results.
Returns
-------
pac : array_like
Phase-Amplitude Coupling measure of shape (n_amp, n_pha, n_epochs)
Attributes
----------
pac : array_like
Unormalized Phase-Amplitude Coupling measure of shape (n_amp,
n_pha, n_epochs)
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
surrogates : array_like
Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)
"""
set_log_level(verbose)
# ---------------------------------------------------------------------
# input checking
pha, amp = self._phampcheck(pha, amp)
self._pvalues, self._surrogates = None, None
# for the plv, extract the phase of the amplitude
if self._idpac[0] == 5:
amp = np.angle(hilbertm(amp))
# ---------------------------------------------------------------------
# check if permutations should be computed
if self._idpac[1] == 0:
n_perm = None
if not isinstance(n_perm, int) or not (n_perm > 0):
self._idpac = (self._idpac[0], 0, 0)
compute_surro = False
else:
compute_surro = True
# ---------------------------------------------------------------------
# copnorm if gaussian copula is used
if self._idpac[0] == 6:
logger.debug(f" copnorm the phase and the amplitude")
pha = copnorm(np.stack([np.sin(pha), np.cos(pha)], axis=-2))
amp = copnorm(amp[..., np.newaxis, :])
# ---------------------------------------------------------------------
# true pac estimation
logger.info(f' true PAC estimation using {self.method}')
fcn = get_pac_fcn(self.idpac[0], self.n_bins, p)
pac = fcn(pha, amp)
self._pac = pac.copy()
# ---------------------------------------------------------------------
# compute surrogates (if needed)
if compute_surro:
if random_state is None:
random_state = int(np.random.randint(0, 10000, size=1))
logger.info(f" compute surrogates ({self.str_surro}, {n_perm} "
f"permutations, random_state={random_state})")
surro = compute_surrogates(pha, amp, self.idpac[1], fcn, n_perm,
n_jobs, random_state)
self._surrogates = surro
# infer pvalues
self.infer_pvalues(p, mcp=mcp)
# ---------------------------------------------------------------------
# normalize (if needed)
if self._idpac[2] != 0:
# Get the mean / deviation of surrogates
logger.info(" normalize true PAC estimation by surrogates "
f"({self.str_norm})")
normalize(self.idpac[2], pac, surro)
return pac
def filterfit(self, sf, x_pha, x_amp=None, n_perm=200, p=.05,
mcp='maxstat', edges=None, n_jobs=-1, random_state=None,
verbose=None):
"""Filt the data then compute PAC on it.
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing PAC. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes. If you want to compute PAC locally i.e. on the
same electrode, x=x_pha=x_amp. For distant coupling, x_pha and
x_amp could be different but still must to have the same shape.
n_perm : int | 200
Number of surrogates to compute.
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
n_jobs : int | -1
Number of jobs to compute PAC in parallel. For very large data,
set this parameter to 1 in order to prevent large memory usage.
random_state : int | None
Fix the random state of the machine for reproducible results.
Returns
-------
pac : array_like
Phase-Amplitude Coupling measure of shape (namp, npha, ...).
Attributes
----------
pac : array_like
Unormalized Phase-Amplitude Coupling measure of shape (n_amp,
n_pha, n_epochs)
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
surrogates : array_like
Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)
"""
# Check if amp is None :
if x_amp is None:
x_amp = x_pha
# Shape checking :
assert x_pha.shape == x_amp.shape, ("Inputs `x_pha` and `x_amp` must "
"have the same shape.")
# Extract phase (npha, ...) and amplitude (namp, ...) :
logger.info(f" extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, 'phase', **kw)
amp = self.filter(sf, x_amp, 'amplitude', **kw)
# Special cases :
if self._idpac[0] == 5:
amp = np.angle(hilbertm(amp))
# Compute pac :
return self.fit(pha, amp, p=p, mcp=mcp, n_perm=n_perm, n_jobs=n_jobs,
random_state=random_state, verbose=verbose)
def infer_pvalues(self, p=0.05, mcp='maxstat'):
"""Infer p-values based on surrogate distribution.
Parameters
----------
p : float | 0.05
Significiency threshold
Returns
-------
pvalues : array_like
Array of p-values of shape (n_amp, n_pha)
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. Use either :
* 'maxstat' : maximum statistics
* 'fdr' : FDR correction (need MNE-Python)
* 'bonferroni' : Bonferroni correction (need MNE-Python)
"""
# ---------------------------------------------------------------------
# check that pac and surrogates has already been computed
assert hasattr(self, 'pac'), ("You should compute PAC first. Use the "
"`fit` method")
assert hasattr(self, 'surrogates'), "No surrogates computed"
# mean pac and surrogates across trials
m_pac, m_surro = self.pac.mean(2), self.surrogates.mean(3)
self._pvalues = self._infer_pvalues(m_pac, m_surro, p=p, mcp=mcp)
return self._pvalues
def _idcheck(self, idpac):
"""Check the idpac parameter."""
idpac = np.atleast_1d(idpac)
if not all([isinstance(k, int) for k in idpac]) and (len(idpac) != 3):
raise ValueError("idpac must be a tuple/list of 3 integers.")
# Ozkurt PAC case (doesn't need surrogates and normalization)
if idpac[0] == 4:
idpac = np.array([4, 0, 0])
if (idpac[0] == 1) and (idpac[1] == 0) and (idpac[2] == 0):
logger.warning(
"MVL is amplitude dependent which means that if the amplitude "
"increases, MVL also increases. You should select a "
"normalization method for correcting this limitation "
"(e.g idpac=(1, 2, 4))")
if (idpac[2] != 0) and (idpac[1] == 0):
logger.warning("If you want to normalize the estimated PAC, you "
"should select a surrogate method (second digit of "
"`idpac`). Normalization ignored.")
idpac[2] = 0
self._idpac = idpac
# string representation
self.method, self.str_surro, self.str_norm = pacstr(idpac)
@property
def idpac(self):
"""Get the idpac value."""
return self._idpac
@idpac.setter
def idpac(self, value):
"""Set idpac value."""
self._idcheck(value)
@property
def pac(self):
"""Array of un-normalized PAC of shape (n_amp, n_pha, n_epochs)."""
return self._pac
@property
def surrogates(self):
"""Array of surrogates of shape (n_perm, n_amp, n_pha, n_epochs)."""
return self._surrogates
@property
def pvalues(self):
"""Array of p-values of shape (n_amp, n_pha)."""
return self._pvalues
class EventRelatedPac(_PacObj, _PacVisual):
"""Compute the Event Related Phase-Amplitude Coupling (ERPAC).
The traditional PAC approach is computed across time, hence this means that
you can't observe PAC changes across time. In contrast, the ERPAC is
computed across epochs (or trials) which preserves the time dimension.
Parameters
----------
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude.
width : int | 7
Width of the Morlet's wavelet.
"""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7, verbose=None):
"""Check and initialize."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
logger.info("Event Related PAC object defined")
def fit(self, pha, amp, method='circular', smooth=None, n_jobs=-1,
n_perm=None, p=.05, mcp='fdr', verbose=None):
"""Compute the Event-Related Phase-Amplitude Coupling (ERPAC).
The ERPAC :cite:`voytek2013method` is used to measure PAC across trials
and is interesting for real-time estimation.
Parameters
----------
pha, amp : array_like
Respectively the phase of slower oscillations of shape
(n_pha, n_epochs, n_times) and the amplitude of faster
oscillations of shape (n_pha, n_epochs, n_times).
method : {'circular', 'gc'}
Name of the method for computing erpac. Use 'circular' for
reproducing :cite:`voytek2013method` or 'gc' for a Gaussian-Copula
based erpac :cite:`ince2017statistical`.
smooth : int | None
Half number of time-points to use to produce a smoothing. Only
active with the Gaussian-Copula ('gc') method.
n_perm : int | None
Number of permutations to compute for assessing p-values for the
gaussian-copula ('gc') method. Statistics are performed by randomly
swapping phase trials
p : float | 0.05
Statistical threshold for the gaussian-copula ('gc') method
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
Returns
-------
erpac : array_like
The ERPAC estimation of shape (n_amp, n_pha, n_times)
"""
set_log_level(verbose)
pha, amp = self._phampcheck(pha, amp)
self.method = method
self._pvalues = None
# move the trial axis to the end (n_freqs, n_times, n_epochs)
pha, amp = np.moveaxis(pha, 1, -1), np.moveaxis(amp, 1, -1)
# method switch
if method == 'circular':
self.method = "ERPAC (Voytek et al. 2013)"
logger.info(f" Compute {self.method}")
self._erpac, self._pvalues = erpac(pha, amp)
self.infer_pvalues(p=p, mcp=mcp)
elif method == 'gc':
self.method = "Gaussian-Copula ERPAC"
logger.info(f" Compute {self.method}")
# copnorm phases and amplitudes then compute erpac
sco = copnorm(np.stack([np.sin(pha), np.cos(pha)], axis=-2))
amp = copnorm(amp)[..., np.newaxis, :]
self._erpac = ergcpac(sco, amp, smooth=smooth, n_jobs=n_jobs)
# compute permutations (if needed)
if isinstance(n_perm, int) and (n_perm > 0):
logger.info(f" Compute {n_perm} permutations")
self._surrogates = _ergcpac_perm(sco, amp, smooth=smooth,
n_jobs=n_jobs, n_perm=n_perm)
self.infer_pvalues(p=p, mcp=mcp)
return self.erpac
def filterfit(self, sf, x_pha, x_amp=None, method='circular', smooth=None,
n_perm=None, p=.05, mcp='fdr', edges=None, n_jobs=-1,
verbose=None):
"""Extract phases, amplitudes and compute ERPAC.
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing ERPAC. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes (i.e n_epochs, n_times). If you want to compute
local ERPAC i.e. on the same electrode, x=x_pha=x_amp. For distant
coupling, x_pha and x_amp could be different but still must to have
the same shape.
method : {'circular', 'gc'}
Name of the method for computing erpac. Use 'circular' for
reproducing :cite:`voytek2013method` or 'gc' for a Gaussian-Copula
based erpac.
smooth : int | None
Half number of time-points to use to produce a smoothing. Only
active with the Gaussian-Copula ('gc') method
:cite:`ince2017statistical`.
n_perm : int | None
Number of permutations to compute for assessing p-values for the
gaussian-copula ('gc') method. Statistics are performed by randomly
swapping phase trials
p : float | 0.05
Statistical threshold for the gaussian-copula ('gc') method
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
erpac : array_like
The ERPAC estimation of shape (n_amp, n_pha, n_times)
"""
x_amp = x_pha if not isinstance(x_amp, np.ndarray) else x_amp
# extract phases and amplitudes
logger.info(f" Extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, ftype='phase', **kw)
amp = self.filter(sf, x_amp, ftype='amplitude', **kw)
# compute erpac
return self.fit(pha, amp, method=method, smooth=smooth, n_jobs=n_jobs,
n_perm=n_perm, p=p, mcp=mcp, verbose=verbose)
def infer_pvalues(self, p=0.05, mcp='fdr'):
"""Infer p-values based on surrogate distribution.
Parameters
----------
p : float | 0.05
Statistical threshold
mcp : {'fdr', 'bonferroni'}
Correct the p-values for multiple comparisons. This is needed when
using the circular ERPAC (:cite:`voytek2013method`). Note that the
correction is performed using MNE-Python.
Returns
-------
pvalues : array_like
Array of p-values of shape (n_amp, n_pha, n_times)
"""
# ---------------------------------------------------------------------
# check that pac and surrogates has already been computed
assert hasattr(self, 'erpac'), ("You should compute ERPAC first. Use "
"the `fit` method")
assert mcp in ['fdr', 'bonferroni']
# correct the p-values for multiple comparisons (Voytek's only)
if "Voytek" in self.method:
logger.info(f" Correct p-values for multiple-comparisons using "
f"{mcp} correction of MNE-Python")
from mne.stats import fdr_correction, bonferroni_correction
fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction
_, self._pvalues = fcn(self._pvalues, alpha=p)
else:
assert hasattr(self, 'surrogates'), "No surrogates computed"
# compute the p-values using maxstat (gcPAC)
self._pvalues = self._infer_pvalues(self.erpac, self.surrogates,
p=p)
return self._pvalues
@property
def erpac(self):
"""Array of event-related PAC of shape ()."""
return self._erpac
@property
def surrogates(self):
"""Array of surrogates of shape (n_perm, n_amp, n_pha, n_times)."""
return self._surrogates
@property
def pvalues(self):
"""Array of p-values of shape (n_amp, n_pha, n_times)."""
return self._pvalues
class PreferredPhase(_PacObj, _PolarPlt):
"""Compute the Preferred Phase (PP).
The preferred phase is defined as the phase at which the amplitude is
maximum.
Parameters
----------
f_pha, f_amp : list/tuple/array | def: [2, 4] and [60, 200]
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude.
width : int | 7
Width of the Morlet's wavelet.
"""
def __init__(self, f_pha=[2, 4], f_amp=[60, 200], dcomplex='hilbert',
cycle=(3, 6), width=7, verbose=None):
"""Check and initialize."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
_PacPlt.__init__(self)
logger.info("Preferred phase object defined")
self.method = 'Preferred-Phase (PP)'
def fit(self, pha, amp, n_bins=72):
"""Compute the preferred-phase.
Parameters
----------
pha, amp : array_like
Respectively the phase of slower oscillations of shape
(n_pha, n_epochs, n_times) and the amplitude of faster
oscillations of shape (n_pha, n_epochs, n_times).
n_bins : int | 72
Number of bins for bining the amplitude according to phase
slices.
Returns
-------
binned_amp : array_like
The binned amplitude according to the phase of shape
(n_bins, n_amp, n_pha, n_epochs)
pp : array_like
The prefered phase where the amplitude is maximum of shape
(namp, npha, n_epochs)
polarvec : array_like
The phase vector for the polar plot of shape (n_bins,)
"""
# Check phase and amplitude shapes :
pha, amp = self._phampcheck(pha, amp)
return preferred_phase(pha, amp, n_bins=n_bins)
def filterfit(self, sf, x_pha, x_amp=None, edges=None, n_bins=12,
verbose=None):
"""Extract phases, amplitudes and compute the preferred phase (PP).
Parameters
----------
sf : float
The sampling frequency.
x_pha, x_amp : array_like
Array of data for computing PP. x_pha is the data used for
extracting phases and x_amp, amplitudes. Both arrays must have
the same shapes (i.e n_epochs, n_times). If you want to compute
local PP i.e. on the same electrode, x=x_pha=x_amp. For distant
coupling, x_pha and x_amp could be different but still must to have
the same shape.
n_bins : int | 72
Number of bins for bining the amplitude according to phase
slices.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
Returns
-------
binned_amp : array_like
The binned amplitude according to the phase of shape
(n_bins, n_amp, n_pha, n_epochs)
pp : array_like
The prefered phase where the amplitude is maximum of shape
(namp, npha, n_epochs)
polarvec : array_like
The phase vector for the polar plot of shape (n_bins,)
"""
x_amp = x_pha if not isinstance(x_amp, np.ndarray) else x_amp
# extract phases and amplitudes
logger.info(f" Extract phases (n_pha={len(self.xvec)}) and "
f"amplitudes (n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, edges=edges, n_jobs=1)
pha = self.filter(sf, x_pha, ftype='phase', **kw)
amp = self.filter(sf, x_amp, ftype='amplitude', **kw)
# compute pp
return self.fit(pha, amp, n_bins=n_bins)
|
EtienneCmb/tensorpac
|
tensorpac/pac.py
|
Python
|
bsd-3-clause
| 36,901
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Classes for the support of Gettext .po and .pot files.
This implementation assumes that cpo is working. This should not be used
directly, but can be used once cpo has been established to work."""
#TODO:
# - handle headerless PO files better
# - previous msgid and msgctxt
# - accept only unicodes everywhere
import re
import copy
import cStringIO
from translate.lang import data
from translate.misc.multistring import multistring
from translate.storage import pocommon, base, cpo, poparser
from translate.storage.pocommon import encodingToUse
lsep = " "
"""Seperator for #: entries"""
basic_header = r'''msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
'''
class pounit(pocommon.pounit):
# othercomments = [] # # this is another comment
# automaticcomments = [] # #. comment extracted from the source code
# sourcecomments = [] # #: sourcefile.xxx:35
# prev_msgctxt = [] # #| The previous values that msgctxt and msgid held
# prev_msgid = [] #
# prev_msgid_plural = [] #
# typecomments = [] # #, fuzzy
# msgidcomment = u"" # _: within msgid
# msgctxt
# msgid = []
# msgstr = []
# Our homegrown way to indicate what must be copied in a shallow
# fashion
__shallow__ = ['_store']
def __init__(self, source=None, encoding="UTF-8"):
pocommon.pounit.__init__(self, source)
self._encoding = encodingToUse(encoding)
self._initallcomments(blankall=True)
self._msgctxt = u""
self.target = u""
def _initallcomments(self, blankall=False):
"""Initialises allcomments"""
if blankall:
self.othercomments = []
self.automaticcomments = []
self.sourcecomments = []
self.typecomments = []
self.msgidcomment = u""
def getsource(self):
return self._source
def setsource(self, source):
self._rich_source = None
# assert isinstance(source, unicode)
source = data.forceunicode(source or u"")
source = source or u""
if isinstance(source, multistring):
self._source = source
elif isinstance(source, unicode):
self._source = source
else:
#unicode, list, dict
self._source = multistring(source)
source = property(getsource, setsource)
def gettarget(self):
"""Returns the unescaped msgstr"""
return self._target
def settarget(self, target):
"""Sets the msgstr to the given (unescaped) value"""
self._rich_target = None
# assert isinstance(target, unicode)
# target = data.forceunicode(target)
if self.hasplural():
if isinstance(target, multistring):
self._target = target
else:
#unicode, list, dict
self._target = multistring(target)
elif isinstance(target, (dict, list)):
if len(target) == 1:
self._target = target[0]
else:
raise ValueError("po msgid element has no plural but msgstr has %d elements (%s)" % (len(target), target))
else:
self._target = target
target = property(gettarget, settarget)
def getnotes(self, origin=None):
"""Return comments based on origin value (programmer, developer, source code and translator)"""
if origin == None:
comments = u"\n".join(self.othercomments)
comments += u"\n".join(self.automaticcomments)
elif origin == "translator":
comments = u"\n".join(self.othercomments)
elif origin in ["programmer", "developer", "source code"]:
comments = u"\n".join(self.automaticcomments)
else:
raise ValueError("Comment type not valid")
return comments
def addnote(self, text, origin=None, position="append"):
"""This is modeled on the XLIFF method. See xliff.py::xliffunit.addnote"""
# ignore empty strings and strings without non-space characters
if not (text and text.strip()):
return
text = data.forceunicode(text)
commentlist = self.othercomments
autocomments = False
if origin in ["programmer", "developer", "source code"]:
autocomments = True
commentlist = self.automaticcomments
if text.endswith(u'\n'):
text = text[:-1]
newcomments = text.split(u"\n")
if position == "append":
newcomments = commentlist + newcomments
elif position == "prepend":
newcomments = newcomments + commentlist
if autocomments:
self.automaticcomments = newcomments
else:
self.othercomments = newcomments
def removenotes(self):
"""Remove all the translator's notes (other comments)"""
self.othercomments = []
def __deepcopy__(self, memo={}):
# Make an instance to serve as the copy
new_unit = self.__class__()
# We'll be testing membership frequently, so make a set from
# self.__shallow__
shallow = set(self.__shallow__)
# Make deep copies of all members which are not in shallow
for key, value in self.__dict__.iteritems():
if key not in shallow:
setattr(new_unit, key, copy.deepcopy(value))
# Make shallow copies of all members which are in shallow
for key in set(shallow):
setattr(new_unit, key, getattr(self, key))
# Mark memo with ourself, so that we won't get deep copied
# again
memo[id(self)] = self
# Return our copied unit
return new_unit
def copy(self):
return copy.deepcopy(self)
def _msgidlen(self):
if self.hasplural():
len("".join([string for string in self.source.strings]))
else:
return len(self.source)
def _msgstrlen(self):
if self.hasplural():
len("".join([string for string in self.target.strings]))
else:
return len(self.target)
def merge(self, otherpo, overwrite=False, comments=True, authoritative=False):
"""Merges the otherpo (with the same msgid) into this one.
Overwrite non-blank self.msgstr only if overwrite is True
merge comments only if comments is True
"""
def mergelists(list1, list2, split=False):
#decode where necessary
if unicode in [type(item) for item in list2] + [type(item) for item in list1]:
for position, item in enumerate(list1):
if isinstance(item, str):
list1[position] = item.decode("utf-8")
for position, item in enumerate(list2):
if isinstance(item, str):
list2[position] = item.decode("utf-8")
#Determine the newline style of list2
lineend = ""
if list2 and list2[0]:
for candidate in ["\n", "\r", "\n\r"]:
if list2[0].endswith(candidate):
lineend = candidate
if not lineend:
lineend = ""
#Split if directed to do so:
if split:
splitlist1 = []
splitlist2 = []
for item in list1:
splitlist1.extend(item.split())
for item in list2:
splitlist2.extend(item.split())
list1.extend([item for item in splitlist2 if not item in splitlist1])
else:
#Normal merge, but conform to list1 newline style
if list1 != list2:
for item in list2:
item = item.rstrip(lineend)
# avoid duplicate comment lines (this might cause some problems)
if item not in list1 or len(item) < 5:
list1.append(item)
if not isinstance(otherpo, pounit):
super(pounit, self).merge(otherpo, overwrite, comments)
return
if comments:
mergelists(self.othercomments, otherpo.othercomments)
mergelists(self.typecomments, otherpo.typecomments)
if not authoritative:
# We don't bring across otherpo.automaticcomments as we consider ourself
# to be the the authority. Same applies to otherpo.msgidcomments
mergelists(self.automaticcomments, otherpo.automaticcomments)
# mergelists(self.msgidcomments, otherpo.msgidcomments) #XXX?
mergelists(self.sourcecomments, otherpo.sourcecomments, split=True)
if not self.istranslated() or overwrite:
# Remove kde-style comments from the translation (if any). XXX - remove
if pocommon.extract_msgid_comment(otherpo.target):
otherpo.target = otherpo.target.replace('_: ' + otherpo._extract_msgidcomments() + '\n', '')
self.target = otherpo.target
if self.source != otherpo.source or self.getcontext() != otherpo.getcontext():
self.markfuzzy()
else:
self.markfuzzy(otherpo.isfuzzy())
elif not otherpo.istranslated():
if self.source != otherpo.source:
self.markfuzzy()
else:
if self.target != otherpo.target:
self.markfuzzy()
def isheader(self):
#TODO: fix up nicely
return not self.getid() and len(self.target) > 0
def isblank(self):
if self.isheader() or self.msgidcomment:
return False
if (self._msgidlen() == 0) and (self._msgstrlen() == 0) and len(self._msgctxt) == 0:
return True
return False
def hastypecomment(self, typecomment):
"""Check whether the given type comment is present"""
# check for word boundaries properly by using a regular expression...
return sum(map(lambda tcline: len(re.findall("\\b%s\\b" % typecomment, tcline)), self.typecomments)) != 0
def hasmarkedcomment(self, commentmarker):
"""Check whether the given comment marker is present as # (commentmarker) ..."""
# raise DeprecationWarning
commentmarker = "(%s)" % commentmarker
for comment in self.othercomments:
if comment.startswith(commentmarker):
return True
return False
def settypecomment(self, typecomment, present=True):
"""Alters whether a given typecomment is present"""
if self.hastypecomment(typecomment) != present:
if present:
self.typecomments.append("#, %s\n" % typecomment)
else:
# this should handle word boundaries properly ...
typecomments = map(lambda tcline: re.sub("\\b%s\\b[ \t,]*" % typecomment, "", tcline), self.typecomments)
self.typecomments = filter(lambda tcline: tcline.strip() != "#,", typecomments)
def istranslated(self):
return super(pounit, self).istranslated() and not self.isobsolete()
def istranslatable(self):
return not (self.isheader() or self.isblank() or self.isobsolete())
def isfuzzy(self):
return self.hastypecomment("fuzzy")
def _domarkfuzzy(self, present=True):
self.settypecomment("fuzzy", present)
def makeobsolete(self):
"""Makes this unit obsolete"""
self.sourcecomments = []
self.automaticcomments = []
super(pounit, self).makeobsolete()
def hasplural(self):
"""returns whether this pounit contains plural strings..."""
source = self.source
return isinstance(source, multistring) and len(source.strings) > 1
def parse(self, src):
raise DeprecationWarning("Should not be parsing with a unit")
return poparser.parse_unit(poparser.ParseState(cStringIO.StringIO(src), pounit), self)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
_cpo_unit = cpo.pounit.buildfromunit(self)
return str(_cpo_unit)
def getlocations(self):
"""Get a list of locations from sourcecomments in the PO unit.
rtype: List
return: A list of the locations with '#: ' stripped
"""
#TODO: rename to .locations
return self.sourcecomments
def addlocation(self, location):
"""Add a location to sourcecomments in the PO unit.
:param location: Text location e.g. 'file.c:23' does not include #:
:type location: String
"""
self.sourcecomments.append(location)
def _extract_msgidcomments(self, text=None):
"""Extract KDE style msgid comments from the unit.
:rtype: String
:return: Returns the extracted msgidcomments found in this unit's msgid.
"""
if text:
return pocommon.extract_msgid_comment(text)
else:
return self.msgidcomment
def getcontext(self):
"""Get the message context."""
return self._msgctxt + self.msgidcomment
def setcontext(self, context):
context = data.forceunicode(context or u"")
self._msgctxt = context
def getid(self):
"""Returns a unique identifier for this unit."""
context = self.getcontext()
# Gettext does not consider the plural to determine duplicates, only
# the msgid. For generation of .mo files, we might want to use this
# code to generate the entry for the hash table, but for now, it is
# commented out for conformance to gettext.
# id = '\0'.join(self.source.strings)
id = self.source
if self.msgidcomment:
id = u"_: %s\n%s" % (context, id)
elif context:
id = u"%s\04%s" % (context, id)
return id
def buildfromunit(cls, unit):
"""Build a native unit from a foreign unit, preserving as much
information as possible."""
if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy):
return unit.copy()
elif isinstance(unit, pocommon.pounit):
newunit = cls(unit.source)
newunit.target = unit.target
#context
newunit.msgidcomment = unit._extract_msgidcomments()
if not newunit.msgidcomment:
newunit.setcontext(unit.getcontext())
locations = unit.getlocations()
if locations:
newunit.addlocations(locations)
notes = unit.getnotes("developer")
if notes:
newunit.addnote(notes, "developer")
notes = unit.getnotes("translator")
if notes:
newunit.addnote(notes, "translator")
newunit.markfuzzy(unit.isfuzzy())
if unit.isobsolete():
newunit.makeobsolete()
for tc in ['python-format', 'c-format', 'php-format']:
if unit.hastypecomment(tc):
newunit.settypecomment(tc)
break
return newunit
else:
return base.TranslationUnit.buildfromunit(unit)
buildfromunit = classmethod(buildfromunit)
class pofile(pocommon.pofile):
"""A .po file containing various units"""
UnitClass = pounit
def changeencoding(self, newencoding):
"""Deprecated: changes the encoding on the file."""
# This should not be here but in poheader. It also shouldn't mangle the
# header itself, but use poheader methods. All users are removed, so
# we can deprecate after one release.
raise DeprecationWarning
self._encoding = encodingToUse(newencoding)
if not self.units:
return
header = self.header()
if not header or header.isblank():
return
charsetline = None
headerstr = header.target
for line in headerstr.split("\n"):
if not ":" in line:
continue
key, value = line.strip().split(":", 1)
if key.strip() != "Content-Type":
continue
charsetline = line
if charsetline is None:
headerstr += "Content-Type: text/plain; charset=%s" % self._encoding
else:
charset = re.search("charset=([^ ]*)", charsetline)
if charset is None:
newcharsetline = charsetline
if not newcharsetline.strip().endswith(";"):
newcharsetline += ";"
newcharsetline += " charset=%s" % self._encoding
else:
charset = charset.group(1)
newcharsetline = charsetline.replace("charset=%s" % charset, "charset=%s" % self._encoding, 1)
headerstr = headerstr.replace(charsetline, newcharsetline, 1)
header.target = headerstr
def _build_self_from_cpo(self):
"""Builds up this store from the internal cpo store.
A user must ensure that self._cpo_store already exists, and that it is
deleted afterwards."""
for unit in self._cpo_store.units:
self.addunit(self.UnitClass.buildfromunit(unit))
self._encoding = self._cpo_store._encoding
def _build_cpo_from_self(self):
"""Builds the internal cpo store from the data in self.
A user must ensure that self._cpo_store does not exist, and should
delete it after using it."""
self._cpo_store = cpo.pofile(noheader=True)
for unit in self.units:
if not unit.isblank():
self._cpo_store.addunit(cpo.pofile.UnitClass.buildfromunit(unit, self._encoding))
if not self._cpo_store.header():
#only add a temporary header
self._cpo_store.makeheader(charset=self._encoding, encoding="8bit")
def parse(self, input):
"""Parses the given file or file source string."""
try:
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
tmp_header_added = False
# if isinstance(input, str) and '"Content-Type: text/plain; charset=' not in input[:200]:
# input = basic_header + input
# tmp_header_added = True
self.units = []
self._cpo_store = cpo.pofile(input, noheader=True)
self._build_self_from_cpo()
del self._cpo_store
if tmp_header_added:
self.units = self.units[1:]
except Exception, e:
raise base.ParseError(e)
def removeduplicates(self, duplicatestyle="merge"):
"""Make sure each msgid is unique ; merge comments etc from duplicates into original"""
# TODO: can we handle consecutive calls to removeduplicates()? What
# about files already containing msgctxt? - test
id_dict = {}
uniqueunits = []
# TODO: this is using a list as the pos aren't hashable, but this is slow.
# probably not used frequently enough to worry about it, though.
markedpos = []
def addcomment(thepo):
thepo.msgidcomment = " ".join(thepo.getlocations())
markedpos.append(thepo)
for thepo in self.units:
id = thepo.getid()
if thepo.isheader() and not thepo.getlocations():
# header msgids shouldn't be merged...
uniqueunits.append(thepo)
elif id in id_dict:
if duplicatestyle == "merge":
if id:
id_dict[id].merge(thepo)
else:
addcomment(thepo)
uniqueunits.append(thepo)
elif duplicatestyle == "msgctxt":
origpo = id_dict[id]
if origpo not in markedpos and id:
# if it doesn't have an id, we already added msgctxt
origpo._msgctxt += " ".join(origpo.getlocations())
markedpos.append(thepo)
thepo._msgctxt += " ".join(thepo.getlocations())
uniqueunits.append(thepo)
else:
if not id:
if duplicatestyle == "merge":
addcomment(thepo)
else:
thepo._msgctxt += u" ".join(thepo.getlocations())
id_dict[id] = thepo
uniqueunits.append(thepo)
self.units = uniqueunits
def __str__(self):
"""Convert to a string. double check that unicode is handled somehow here"""
self._cpo_store = cpo.pofile(encoding=self._encoding, noheader=True)
try:
self._build_cpo_from_self()
except UnicodeEncodeError, e:
self._encoding = "utf-8"
self.updateheader(add=True, Content_Type="text/plain; charset=UTF-8")
self._build_cpo_from_self()
output = str(self._cpo_store)
del self._cpo_store
return output
|
mozilla/verbatim
|
vendor/lib/python/translate/storage/fpo.py
|
Python
|
gpl-2.0
| 22,052
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------
#-- Servo class
#-- Juan Gonzalez-Gomez (obijuan). May-2013
#-----------------------------------------------------------------
#-- Controlling the position of servos from the PC
#-- The Arduino / skymega or another arduino compatible board
#-- should have the firmware FingerServer uploaded
#-----------------------------------------------------------------
import time
class IncorrectAngle():
pass
class Servo(object):
"""Servo class. For accessing to all the Servos"""
def __init__(self, sp, dir = 0):
"""Arguments: serial port and servo number"""
self.sp = sp #-- Serial device
self.dir = dir #-- Servo number
self._pos = 0; #-- Current pos
def __str__(self):
str1 = "Servo: {0}\n".format(self.dir)
str2 = "Serial port: {0}".format(self.sp.name)
return str1 + str2
def set_pos(self, pos):
"""Set the angular servo pos. The pos is an integer number
in the range [-90 ,90] """
#-- Check that the pos in the range [-90,90]
if not (-90 <= pos <= 90):
raise IncorrectAngle()
return
#-- Convert the pos to an integer value
pos = int(round(pos))
#-- Build the frame
frame = self.dir + str(pos) + "\r"
#-- Debug
print (frame)
#-- Send the frame
self.sp.write(frame)
#-- Store the current servo pos
self._pos = pos
@property
def pos(self):
"""Read the current servo pos"""
return self._pos
@pos.setter
def pos(self, value):
"""Set the sero pos"""
self.set_pos(value)
|
Obijuan/protocoder-apps
|
servos/python-client/Servo.py
|
Python
|
gpl-2.0
| 1,675
|
import datetime
from datetime import timedelta, time
from django.test import TestCase
from ..libs import (
get_user_realname,
format_totaltime,
format_hours_float,
format_time,
get_localtime,
get_thismonth_1st,
export_csv_task,
)
class TestLib(TestCase):
fixtures = ['test_views.json']
def setUp(self):
pass
def tearDown(self):
pass
def test_format_totaltime(self):
# easy
_delta = datetime.timedelta(days=0, hours=0, minutes=0, seconds=0)
_result = format_totaltime(_delta)
self.assertEqual(_result, '0:00')
# min 24h
_delta = datetime.timedelta(days=0, hours=23, minutes=59, seconds=0)
_result = format_totaltime(_delta)
self.assertEqual(_result, '23:59')
# over 24h
_delta = datetime.timedelta(days=0, hours=24, minutes=0, seconds=0)
_result = format_totaltime(_delta)
self.assertEqual(_result, '24:00')
# min 100h
_delta = datetime.timedelta(days=4, hours=3, minutes=59, seconds=0)
_result = format_totaltime(_delta)
self.assertEqual(_result, '99:59')
# over100h
_delta = datetime.timedelta(days=4, hours=4, minutes=00, seconds=0)
_result = format_totaltime(_delta)
self.assertEqual(_result, '100:00')
def test_format_hours_float(self):
# easy
_delta = datetime.timedelta(days=0, hours=0, minutes=0, seconds=0)
_result = format_hours_float(_delta)
self.assertEqual(_result, 0.0)
# 15min = a quarter
_delta = datetime.timedelta(days=0, hours=23, minutes=15, seconds=0)
_result = format_hours_float(_delta)
self.assertEqual(_result, 23.25)
# 30min = a harf
_delta = datetime.timedelta(days=0, hours=23, minutes=30, seconds=0)
_result = format_hours_float(_delta)
self.assertEqual(_result, 23.5)
# 45min = three quarters
_delta = datetime.timedelta(days=0, hours=23, minutes=45, seconds=0)
_result = format_hours_float(_delta)
self.assertEqual(_result, 23.75)
# convert to hour from days
_delta = datetime.timedelta(days=4, hours=3, minutes=45, seconds=0)
_result = format_hours_float(_delta)
self.assertEqual(_result, 99.75)
def test_get_user_realname(self):
_s = get_user_realname('aaa', 'bbb', False)
self.assertEqual(_s, 'aaa bbb')
_s = get_user_realname('aaa', 'bbb', True)
self.assertEqual(_s, 'bbb aaa')
def test_format_time(self):
_t = datetime.time(hour=1, minute=5)
_s = format_time(_t)
self.assertEqual(_s, '1:05')
def test_export_csv_task(self):
_datalist = [
{
'taskdate': datetime.date(year=2015, month=4, day=5),
'project__name': 'projname1',
'project__external_project__code': 'abcd',
'task__job__name': 'jobname1',
'task__name': 'taskname1',
'user__first_name': 'user1_first',
'user__last_name': 'user1_last',
'tasktime': datetime.time(hour=1, minute=15),
'task__userdata1': '111',
'task__userdata2': '222',
'task__userdata3': '333',
'task__userdata4': '444',
'task__userdata5': '555',
'comment': 'this is comment.',
},
]
#
# if _add_header is True
#
_add_header = True
_data_bin = export_csv_task(_datalist, _add_header, "\n")
self.assertIsNotNone(_data_bin)
print(_data_bin)
_data_str = _data_bin.decode('utf8')
_rows = _data_str.split("\n")
_cols_0 = _rows[0].split(',')
self.assertEqual(_cols_0[0], '"date"')
self.assertEqual(_cols_0[1], '"project"')
self.assertEqual(_cols_0[2], '"code"')
self.assertEqual(_cols_0[3], '"job"')
self.assertEqual(_cols_0[4], '"task"')
self.assertEqual(_cols_0[5], '"user"')
self.assertEqual(_cols_0[6], '"tasktime"')
self.assertEqual(_cols_0[7], '"task_userdata1"')
self.assertEqual(_cols_0[8], '"task_userdata2"')
self.assertEqual(_cols_0[9], '"task_userdata3"')
self.assertEqual(_cols_0[10], '"task_userdata4"')
self.assertEqual(_cols_0[11], '"task_userdata5"')
self.assertEqual(_cols_0[12], '"comment"')
_cols_1 = _rows[1].split(',')
self.assertEqual(_cols_1[0], '"2015-04-05"')
self.assertEqual(_cols_1[1], '"projname1"')
self.assertEqual(_cols_1[2], '"abcd"')
self.assertEqual(_cols_1[3], '"jobname1"')
self.assertEqual(_cols_1[4], '"taskname1"')
self.assertEqual(_cols_1[5], '"user1_first user1_last"')
self.assertEqual(_cols_1[6], '"1:15"')
self.assertEqual(_cols_1[7], '"111"')
self.assertEqual(_cols_1[8], '"222"')
self.assertEqual(_cols_1[9], '"333"')
self.assertEqual(_cols_1[10], '"444"')
self.assertEqual(_cols_1[11], '"555"')
self.assertEqual(_cols_1[12], '"this is comment."')
#
# if _add_header is False
#
_add_header = False
_data_bin = export_csv_task(_datalist, _add_header, "\n")
self.assertIsNotNone(_data_bin)
print(_data_bin)
_data_str = _data_bin.decode('utf8')
_rows = _data_str.split("\n")
_cols_1 = _rows[0].split(',')
self.assertEqual(_cols_1[0], '"2015-04-05"')
self.assertEqual(_cols_1[1], '"projname1"')
self.assertEqual(_cols_1[2], '"abcd"')
self.assertEqual(_cols_1[3], '"jobname1"')
self.assertEqual(_cols_1[4], '"taskname1"')
self.assertEqual(_cols_1[5], '"user1_first user1_last"')
self.assertEqual(_cols_1[6], '"1:15"')
self.assertEqual(_cols_1[6], '"1:15"')
self.assertEqual(_cols_1[7], '"111"')
self.assertEqual(_cols_1[8], '"222"')
self.assertEqual(_cols_1[9], '"333"')
self.assertEqual(_cols_1[10], '"444"')
self.assertEqual(_cols_1[11], '"555"')
self.assertEqual(_cols_1[12], '"this is comment."')
|
dictoss/active-task-summary
|
ats/tests/tests_libs.py
|
Python
|
bsd-2-clause
| 6,226
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""'Database' management."""
# ChestTimer, an agenda creator for GW2 chests.
# Copyright (C) 2014 Julio Biason
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
jbiason/chesttimer
|
api/chesttimer/db/__init__.py
|
Python
|
gpl-3.0
| 793
|
import boto.swf.layer2 as swf
DOMAIN = 'stackoverflow'
VERSION = '1.0'
class PrioritizingWorker(swf.ActivityWorker):
domain = DOMAIN
version = VERSION
def run(self):
urgent_task_count = swf.Domain(name=DOMAIN).count_pending_activity_tasks('urgent_tasks').get('count', 0)
if urgent_task_count > 0:
self.task_list = 'urgent_tasks'
else:
self.task_list = 'default_tasks'
activity_task = self.poll()
if 'activityId' in activity_task:
print urgent_task_count, 'urgent tasks in the queue. Executing ' + activity_task.get('activityId')
self.complete()
return True
|
oozie/stackoverflow
|
amazon-swf/priority_tasks/worker.py
|
Python
|
bsd-3-clause
| 675
|
#-*- encoding: utf-8 -*-
#Note: The YouTube Data API (v2) has been officially deprecated as of March 4, 2014. Please refer to our deprecation policy for more information.
#https://developers.google.com/youtube/2.0/developers_guide_protocol_api_query_parameters
try:
from urllib.request import urlopen, Request
from urllib.parse import quote_plus, quote
except ImportError:
from urllib import quote_plus, quote
from urllib2 import Request, urlopen
import logging
import sys
import xml.etree.ElementTree as et
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
class XMLScraper():
__logger=logging.getLogger(__name__)
def downloadWebData(self, url):
data = ""
try:
req = Request(url)
req.add_header('User-Agent', USER_AGENT)
response = urlopen(req)
if sys.version_info < (3, 0):
data = response.read()
else:
data = response.readall().decode('utf-8')
response.close()
except:
raise Exception()
return data
def downloadXML(self, url):
try:
xmlString = self.downloadWebData(url)
tree = et.fromstring(xmlString)
self.__logger.debug(et.dump(tree))
return tree
except:
raise Exception()
class Youtube():
__scraper = None
__logger=logging.getLogger(__name__)
NAMESPACES = {
'Atom': 'http://www.w3.org/2005/Atom',
'openSearch': 'http://a9.com/-/spec/opensearchrss/1.0/',
'yt': 'http://gdata.youtube.com/schemas/2007'}
URLS = {
'uploads' : 'https://gdata.youtube.com/feeds/api/users/{0[0]}/uploads?start-index={0[1]}&max-results={0[2]}',
'channelSearch': 'https://gdata.youtube.com/feeds/api/channels?q="{0[0]}"&start-index={0[1]}&max-results={0[2]}&v=2',
'videoSearch': 'https://gdata.youtube.com/feeds/api/videos?q="{0[0]}"&start-index={0[1]}&max-results={0[2]}&v=2'}
def __init__(self):
self.__scraper=XMLScraper()
def __prepareURL(self, url_key, replacement_list):
url = self.URLS[url_key].format(replacement_list)
url = quote(url, safe="%/:=&?~#+!$,;'@()*[]")
self.__logger.debug('prepareURL: ' + url)
return url
'''
list all Channel videos
@param channelName name of the channel
@param offset where to start(smallest is 1)
@param limit max amount of videos to pull, max limit is 25
'''
def listChannelVideos(self, channelName, offset=1, limit=25):
url = self.__prepareURL('uploads', (channelName, offset, limit))
return self.__scraper.downloadXML(url)
'''
search for Channel
@param channelName name of the channel
@param offset where to start(smallest is 1)
@param limit max amount of videos to pull, max limit is 25
'''
def searchChannel(self, channelName, offset=1, limit=25):
url = self.__prepareURL('channelSearch', (channelName, offset, limit))
return self.__scraper.downloadXML(url)
'''
search for Channel
@param videoName name of the video
@param offset where to start(smallest is 1)
@param limit max amount of videos to pull, max limit is 25
'''
def searchVideo(self, videoName, offset=1, limit=25):
url = self.__prepareURL('videoSearch', (videoName, offset, limit))
return self.__scraper.downloadXML(url)
|
ingwinlu/simpleMediaCenter
|
simpleMediaCenter/helpers/youtube/__init__.py
|
Python
|
gpl-2.0
| 3,580
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('tracks.urls'))
]
|
jochenklar/bike
|
config/urls.py
|
Python
|
apache-2.0
| 164
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""
The pex.pex utility builds PEX environments and .pex files specified by
sources, requirements and their dependencies.
"""
from __future__ import absolute_import, print_function
import functools
import os
import shutil
import sys
from optparse import OptionGroup, OptionParser, OptionValueError
from textwrap import TextWrapper
from pex.archiver import Archiver
from pex.base import maybe_requirement
from pex.common import die, safe_delete, safe_mkdir, safe_mkdtemp
from pex.crawler import Crawler
from pex.fetcher import Fetcher, PyPIFetcher
from pex.http import Context
from pex.installer import EggInstaller
from pex.interpreter import PythonInterpreter
from pex.iterator import Iterator
from pex.package import EggPackage, SourcePackage
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.platforms import Platform
from pex.requirements import requirements_from_file
from pex.resolvable import Resolvable
from pex.resolver import CachingResolver, Resolver, Unsatisfiable
from pex.resolver_options import ResolverOptionsBuilder
from pex.tracer import TRACER
from pex.variables import ENV, Variables
from pex.version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT, __version__
CANNOT_DISTILL = 101
CANNOT_SETUP_INTERPRETER = 102
INVALID_OPTIONS = 103
INVALID_ENTRY_POINT = 104
class Logger(object):
def _default_logger(self, msg, v):
if v:
print(msg, file=sys.stderr)
_LOGGER = _default_logger
def __call__(self, msg, v):
self._LOGGER(msg, v)
def set_logger(self, logger_callback):
self._LOGGER = logger_callback
log = Logger()
def parse_bool(option, opt_str, _, parser):
setattr(parser.values, option.dest, not opt_str.startswith('--no'))
def increment_verbosity(option, opt_str, _, parser):
verbosity = getattr(parser.values, option.dest, 0)
setattr(parser.values, option.dest, verbosity + 1)
def process_disable_cache(option, option_str, option_value, parser):
setattr(parser.values, option.dest, None)
def process_pypi_option(option, option_str, option_value, parser, builder):
if option_str.startswith('--no'):
setattr(parser.values, option.dest, [])
builder.clear_indices()
else:
indices = getattr(parser.values, option.dest, [])
pypi = PyPIFetcher()
if pypi not in indices:
indices.append(pypi)
setattr(parser.values, option.dest, indices)
builder.add_index(PyPIFetcher.PYPI_BASE)
def process_find_links(option, option_str, option_value, parser, builder):
repos = getattr(parser.values, option.dest, [])
repo = Fetcher([option_value])
if repo not in repos:
repos.append(repo)
setattr(parser.values, option.dest, repos)
builder.add_repository(option_value)
def process_index_url(option, option_str, option_value, parser, builder):
indices = getattr(parser.values, option.dest, [])
index = PyPIFetcher(option_value)
if index not in indices:
indices.append(index)
setattr(parser.values, option.dest, indices)
builder.add_index(option_value)
def process_prereleases(option, option_str, option_value, parser, builder):
if option_str == '--pre':
builder.allow_prereleases(True)
elif option_str == '--no-pre':
builder.allow_prereleases(False)
else:
raise OptionValueError
def process_precedence(option, option_str, option_value, parser, builder):
if option_str == '--build':
builder.allow_builds()
elif option_str == '--no-build':
builder.no_allow_builds()
elif option_str == '--wheel':
setattr(parser.values, option.dest, True)
builder.use_wheel()
elif option_str in ('--no-wheel', '--no-use-wheel'):
setattr(parser.values, option.dest, False)
builder.no_use_wheel()
else:
raise OptionValueError
def print_variable_help(option, option_str, option_value, parser):
for variable_name, variable_type, variable_help in Variables.iter_help():
print('\n%s: %s\n' % (variable_name, variable_type))
for line in TextWrapper(initial_indent=' ' * 4, subsequent_indent=' ' * 4).wrap(variable_help):
print(line)
sys.exit(0)
def configure_clp_pex_resolution(parser, builder):
group = OptionGroup(
parser,
'Resolver options',
'Tailor how to find, resolve and translate the packages that get put into the PEX '
'environment.')
group.add_option(
'--pypi', '--no-pypi', '--no-index',
action='callback',
dest='repos',
callback=process_pypi_option,
callback_args=(builder,),
help='Whether to use pypi to resolve dependencies; Default: use pypi')
group.add_option(
'-f', '--find-links', '--repo',
metavar='PATH/URL',
action='callback',
dest='repos',
callback=process_find_links,
callback_args=(builder,),
type=str,
help='Additional repository path (directory or URL) to look for requirements.')
group.add_option(
'-i', '--index', '--index-url',
metavar='URL',
action='callback',
dest='repos',
callback=process_index_url,
callback_args=(builder,),
type=str,
help='Additional cheeseshop indices to use to satisfy requirements.')
group.add_option(
'--pre', '--no-pre',
dest='allow_prereleases',
default=None,
action='callback',
callback=process_prereleases,
callback_args=(builder,),
help='Whether to include pre-release and development versions of requirements; '
'Default: only stable versions are used, unless explicitly requested')
group.add_option(
'--disable-cache',
action='callback',
dest='cache_dir',
callback=process_disable_cache,
help='Disable caching in the pex tool entirely.')
group.add_option(
'--cache-dir',
dest='cache_dir',
default='{pex_root}/build',
help='The local cache directory to use for speeding up requirement '
'lookups. [Default: ~/.pex/build]')
group.add_option(
'--cache-ttl',
dest='cache_ttl',
type=int,
default=3600,
help='The cache TTL to use for inexact requirement specifications.')
group.add_option(
'--wheel', '--no-wheel', '--no-use-wheel',
dest='use_wheel',
default=True,
action='callback',
callback=process_precedence,
callback_args=(builder,),
help='Whether to allow wheel distributions; Default: allow wheels')
group.add_option(
'--build', '--no-build',
action='callback',
callback=process_precedence,
callback_args=(builder,),
help='Whether to allow building of distributions from source; Default: allow builds')
# Set the pex tool to fetch from PyPI by default if nothing is specified.
parser.set_default('repos', [PyPIFetcher()])
parser.add_option_group(group)
def configure_clp_pex_options(parser):
group = OptionGroup(
parser,
'PEX output options',
'Tailor the behavior of the emitted .pex file if -o is specified.')
group.add_option(
'--zip-safe', '--not-zip-safe',
dest='zip_safe',
default=True,
action='callback',
callback=parse_bool,
help='Whether or not the sources in the pex file are zip safe. If they are '
'not zip safe, they will be written to disk prior to execution; '
'Default: zip safe.')
group.add_option(
'--always-write-cache',
dest='always_write_cache',
default=False,
action='store_true',
help='Always write the internally cached distributions to disk prior to invoking '
'the pex source code. This can use less memory in RAM constrained '
'environments. [Default: %default]')
group.add_option(
'--ignore-errors',
dest='ignore_errors',
default=False,
action='store_true',
help='Ignore run-time requirement resolution errors when invoking the pex. '
'[Default: %default]')
group.add_option(
'--inherit-path',
dest='inherit_path',
default=False,
action='store_true',
help='Inherit the contents of sys.path (including site-packages) running the pex. '
'[Default: %default]')
parser.add_option_group(group)
def configure_clp_pex_environment(parser):
group = OptionGroup(
parser,
'PEX environment options',
'Tailor the interpreter and platform targets for the PEX environment.')
group.add_option(
'--python',
dest='python',
default=None,
help='The Python interpreter to use to build the pex. Either specify an explicit '
'path to an interpreter, or specify a binary accessible on $PATH. '
'Default: Use current interpreter.')
group.add_option(
'--python-shebang',
dest='python_shebang',
default=None,
help='The exact shebang (#!...) line to add at the top of the PEX file minus the '
'#!. This overrides the default behavior, which picks an environment python '
'interpreter compatible with the one used to build the PEX file.')
group.add_option(
'--platform',
dest='platform',
default=Platform.current(),
help='The platform for which to build the PEX. Default: %default')
group.add_option(
'--interpreter-cache-dir',
dest='interpreter_cache_dir',
default='{pex_root}/interpreters',
help='The interpreter cache to use for keeping track of interpreter dependencies '
'for the pex tool. [Default: ~/.pex/interpreters]')
parser.add_option_group(group)
def configure_clp_pex_entry_points(parser):
group = OptionGroup(
parser,
'PEX entry point options',
'Specify what target/module the PEX should invoke if any.')
group.add_option(
'-m', '-e', '--entry-point',
dest='entry_point',
metavar='MODULE[:SYMBOL]',
default=None,
help='Set the entry point to module or module:symbol. If just specifying module, pex '
'behaves like python -m, e.g. python -m SimpleHTTPServer. If specifying '
'module:symbol, pex imports that symbol and invokes it as if it were main.')
group.add_option(
'-c', '--script', '--console-script',
dest='script',
default=None,
metavar='SCRIPT_NAME',
help='Set the entry point as to the script or console_script as defined by a any of the '
'distributions in the pex. For example: "pex -c fab fabric" or "pex -c mturk boto".')
parser.add_option_group(group)
def configure_clp():
usage = (
'%prog [-o OUTPUT.PEX] [options] [-- arg1 arg2 ...]\n\n'
'%prog builds a PEX (Python Executable) file based on the given specifications: '
'sources, requirements, their dependencies and other options.')
parser = OptionParser(usage=usage, version='%prog {0}'.format(__version__))
resolver_options_builder = ResolverOptionsBuilder()
configure_clp_pex_resolution(parser, resolver_options_builder)
configure_clp_pex_options(parser)
configure_clp_pex_environment(parser)
configure_clp_pex_entry_points(parser)
parser.add_option(
'-o', '--output-file',
dest='pex_name',
default=None,
help='The name of the generated .pex file: Omiting this will run PEX '
'immediately and not save it to a file.')
parser.add_option(
'-r', '--requirement',
dest='requirement_files',
metavar='FILE',
default=[],
type=str,
action='append',
help='Add requirements from the given requirements file. This option can be used multiple '
'times.')
parser.add_option(
'--constraints',
dest='constraint_files',
metavar='FILE',
default=[],
type=str,
action='append',
help='Add constraints from the given constraints file. This option can be used multiple '
'times.')
parser.add_option(
'-v',
dest='verbosity',
default=0,
action='callback',
callback=increment_verbosity,
help='Turn on logging verbosity, may be specified multiple times.')
parser.add_option(
'--pex-root',
dest='pex_root',
default=None,
help='Specify the pex root used in this invocation of pex. [Default: ~/.pex]'
)
parser.add_option(
'--help-variables',
action='callback',
callback=print_variable_help,
help='Print out help about the various environment variables used to change the behavior of '
'a running PEX file.')
return parser, resolver_options_builder
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
def _resolve_and_link_interpreter(requirement, fetchers, target_link, installer_provider):
# Short-circuit if there is a local copy
if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)):
egg = EggPackage(os.path.realpath(target_link))
if egg.satisfies(requirement):
return egg
context = Context.get()
iterator = Iterator(fetchers=fetchers, crawler=Crawler(context))
links = [link for link in iterator.iter(requirement) if isinstance(link, SourcePackage)]
with TRACER.timed('Interpreter cache resolving %s' % requirement, V=2):
for link in links:
with TRACER.timed('Fetching %s' % link, V=3):
sdist = context.fetch(link)
with TRACER.timed('Installing %s' % link, V=3):
installer = installer_provider(sdist)
dist_location = installer.bdist()
target_location = os.path.join(
os.path.dirname(target_link), os.path.basename(dist_location))
shutil.move(dist_location, target_location)
_safe_link(target_location, target_link)
return EggPackage(target_location)
def resolve_interpreter(cache, fetchers, interpreter, requirement):
"""Resolve an interpreter with a specific requirement.
Given a :class:`PythonInterpreter` and a requirement, return an
interpreter with the capability of resolving that requirement or
``None`` if it's not possible to install a suitable requirement."""
requirement = maybe_requirement(requirement)
# short circuit
if interpreter.satisfies([requirement]):
return interpreter
def installer_provider(sdist):
return EggInstaller(
Archiver.unpack(sdist),
strict=requirement.key != 'setuptools',
interpreter=interpreter)
interpreter_dir = os.path.join(cache, str(interpreter.identity))
safe_mkdir(interpreter_dir)
egg = _resolve_and_link_interpreter(
requirement,
fetchers,
os.path.join(interpreter_dir, requirement.key),
installer_provider)
if egg:
return interpreter.with_extra(egg.name, egg.raw_version, egg.path)
def interpreter_from_options(options):
interpreter = None
if options.python:
if os.path.exists(options.python):
interpreter = PythonInterpreter.from_binary(options.python)
else:
interpreter = PythonInterpreter.from_env(options.python)
if interpreter is None:
die('Failed to find interpreter: %s' % options.python)
else:
interpreter = PythonInterpreter.get()
with TRACER.timed('Setting up interpreter %s' % interpreter.binary, V=2):
resolve = functools.partial(resolve_interpreter, options.interpreter_cache_dir, options.repos)
# resolve setuptools
interpreter = resolve(interpreter, SETUPTOOLS_REQUIREMENT)
# possibly resolve wheel
if interpreter and options.use_wheel:
interpreter = resolve(interpreter, WHEEL_REQUIREMENT)
return interpreter
def build_pex(args, options, resolver_option_builder):
with TRACER.timed('Resolving interpreter', V=2):
interpreter = interpreter_from_options(options)
if interpreter is None:
die('Could not find compatible interpreter', CANNOT_SETUP_INTERPRETER)
pex_builder = PEXBuilder(path=safe_mkdtemp(), interpreter=interpreter)
pex_info = pex_builder.info
pex_info.zip_safe = options.zip_safe
pex_info.always_write_cache = options.always_write_cache
pex_info.ignore_errors = options.ignore_errors
pex_info.inherit_path = options.inherit_path
resolvables = [Resolvable.get(arg, resolver_option_builder) for arg in args]
for requirements_txt in options.requirement_files:
resolvables.extend(requirements_from_file(requirements_txt, resolver_option_builder))
# pip states the constraints format is identical tor requirements
# https://pip.pypa.io/en/stable/user_guide/#constraints-files
for constraints_txt in options.constraint_files:
constraints = []
for r in requirements_from_file(constraints_txt, resolver_option_builder):
r.is_constraint = True
constraints.append(r)
resolvables.extend(constraints)
resolver_kwargs = dict(interpreter=interpreter, platform=options.platform)
if options.cache_dir:
resolver = CachingResolver(options.cache_dir, options.cache_ttl, **resolver_kwargs)
else:
resolver = Resolver(**resolver_kwargs)
with TRACER.timed('Resolving distributions'):
try:
resolveds = resolver.resolve(resolvables)
except Unsatisfiable as e:
die(e)
for dist in resolveds:
log(' %s' % dist, v=options.verbosity)
pex_builder.add_distribution(dist)
pex_builder.add_requirement(dist.as_requirement())
if options.entry_point and options.script:
die('Must specify at most one entry point or script.', INVALID_OPTIONS)
if options.entry_point:
pex_builder.set_entry_point(options.entry_point)
elif options.script:
pex_builder.set_script(options.script)
if options.python_shebang:
pex_builder.set_shebang(options.python_shebang)
return pex_builder
def make_relative_to_root(path):
"""Update options so that defaults are user relative to specified pex_root."""
return os.path.normpath(path.format(pex_root=ENV.PEX_ROOT))
def main(args=None):
args = args or sys.argv[1:]
parser, resolver_options_builder = configure_clp()
try:
separator = args.index('--')
args, cmdline = args[:separator], args[separator + 1:]
except ValueError:
args, cmdline = args, []
options, reqs = parser.parse_args(args=args)
if options.pex_root:
ENV.set('PEX_ROOT', options.pex_root)
else:
options.pex_root = ENV.PEX_ROOT # If option not specified fallback to env variable.
# Don't alter cache if it is disabled.
if options.cache_dir:
options.cache_dir = make_relative_to_root(options.cache_dir)
options.interpreter_cache_dir = make_relative_to_root(options.interpreter_cache_dir)
with ENV.patch(PEX_VERBOSE=str(options.verbosity)):
with TRACER.timed('Building pex'):
pex_builder = build_pex(reqs, options, resolver_options_builder)
if options.pex_name is not None:
log('Saving PEX file to %s' % options.pex_name, v=options.verbosity)
tmp_name = options.pex_name + '~'
safe_delete(tmp_name)
pex_builder.build(tmp_name)
os.rename(tmp_name, options.pex_name)
return 0
if options.platform != Platform.current():
log('WARNING: attempting to run PEX with differing platform!')
pex_builder.freeze()
log('Running PEX file at %s with args %s' % (pex_builder.path(), cmdline), v=options.verbosity)
pex = PEX(pex_builder.path(), interpreter=pex_builder.interpreter)
sys.exit(pex.run(args=list(cmdline)))
if __name__ == '__main__':
main()
|
snyaggarwal/pex
|
pex/bin/pex.py
|
Python
|
apache-2.0
| 19,324
|
import balanced
balanced.configure('ak-test-1o9QKwUCrwstHWO5sGxICtIJdQXFTjnrV')
card = balanced.Card.fetch('/cards/CC4OTo7bbk25ZWmhdQCdXkPu')
card.unstore()
|
trenton42/txbalanced
|
scenarios/card_delete/executable.py
|
Python
|
mit
| 158
|
# Copyright 2005 Joe Wreschnig, Michael Urman
# 2021 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
from quodlibet.util import escape
from senf import fsn2text, path2fsn
from quodlibet import _
from quodlibet import util
from quodlibet.qltk.icons import Icons
from quodlibet.qltk import get_top_parent
from quodlibet.qltk.window import Dialog
class Message(Gtk.MessageDialog, Dialog):
"""A message dialog that destroys itself after it is run, uses
markup, and defaults to an 'OK' button."""
def __init__(self, kind, parent, title, description, buttons=Gtk.ButtonsType.OK,
escape_desc=True):
parent = get_top_parent(parent)
text = ("<span weight='bold' size='larger'>%s</span>\n\n%s"
% (escape(title), escape(description) if escape_desc else description))
super().__init__(
transient_for=parent, modal=True, destroy_with_parent=True,
message_type=kind, buttons=buttons)
self.set_markup(text)
def run(self, destroy=True):
resp = super().run()
if destroy:
self.destroy()
return resp
class CancelRevertSave(Gtk.MessageDialog, Dialog):
def __init__(self, parent):
title = _("Discard tag changes?")
description = _("Tags have been changed but not saved. Save these "
"files, or revert and discard changes?")
text = ("<span weight='bold' size='larger'>%s</span>\n\n%s"
% (title, description))
parent = get_top_parent(parent)
super().__init__(
transient_for=parent, flags=0,
message_type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.NONE)
self.add_icon_button(_("_Save"), Icons.DOCUMENT_SAVE,
Gtk.ResponseType.YES)
self.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
self.add_icon_button(_("_Revert"), Icons.DOCUMENT_REVERT,
Gtk.ResponseType.NO)
self.set_default_response(Gtk.ResponseType.NO)
self.set_markup(text)
def run(self):
resp = super().run()
self.destroy()
return resp
class ErrorMessage(Message):
"""Like Message, but uses an error-indicating picture."""
def __init__(self, *args, **kwargs):
super().__init__(
Gtk.MessageType.ERROR, *args, **kwargs)
class WarningMessage(Message):
"""Like Message, but uses an warning-indicating picture."""
def __init__(self, *args, **kwargs):
super().__init__(
Gtk.MessageType.WARNING, *args, **kwargs)
class ConfirmationPrompt(WarningMessage):
"""Dialog to confirm actions, given a parent, title, description, and
OK-button text"""
RESPONSE_INVOKE = Gtk.ResponseType.YES
def __init__(self, parent, title, description, ok_button_text,
ok_button_icon=Icons.SYSTEM_RUN):
super().__init__(parent, title, description, buttons=Gtk.ButtonsType.NONE)
self.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
self.add_icon_button(ok_button_text, ok_button_icon, self.RESPONSE_INVOKE)
self.set_default_response(Gtk.ResponseType.CANCEL)
class ConfirmFileReplace(WarningMessage):
RESPONSE_REPLACE = 1
def __init__(self, parent, path):
title = _("File exists")
fn_format = "<b>%s</b>" % util.escape(fsn2text(path2fsn(path)))
description = _("Replace %(file-name)s?") % {"file-name": fn_format}
super().__init__(
parent, title, description, buttons=Gtk.ButtonsType.NONE)
self.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
self.add_icon_button(_("_Replace File"), Icons.DOCUMENT_SAVE,
self.RESPONSE_REPLACE)
self.set_default_response(Gtk.ResponseType.CANCEL)
|
quodlibet/quodlibet
|
quodlibet/qltk/msg.py
|
Python
|
gpl-2.0
| 4,091
|
# Bob build tool
# Copyright (C) 2021 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from ...errors import BuildError
from ...tty import Warn, WarnOnce
from ...utils import asHexStr, hashDirectoryWithSize, isWindows
import errno
import os, os.path
import json
import shutil
import sys
import tempfile
warnRepoSize = WarnOnce("The shared repository is over its quota. Run 'bob clean --shared' to free disk space!")
warnGcDidNotHelp = WarnOnce("The automatic garbage collection of the shared repository was unable to free enough space. Run 'bob clean --shared' manually.")
warnNoShareConfigured = Warn("No shared directory configured! Nothing cleaned.")
if sys.platform == 'win32':
import msvcrt
def lockFile(fd, exclusive):
msvcrt.locking(fd.fileno(), msvcrt.LK_LOCK if exclusive else msvcrt.LK_RLCK, 0x100000)
def unlockFile(fd):
fd.seek(0)
msvcrt.locking(fd.fileno(), msvcrt.LK_UNLCK, 0x100000)
else:
import fcntl
def lockFile(fd, exclusive):
fcntl.flock(fd, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def unlockFile(fd):
fcntl.flock(fd, fcntl.LOCK_UN)
# The first two generations were used by the Jenkins backend.
SHARED_GENERATION = '-3'
class OpenLocked:
def __init__(self, fileName, mode, exclusive):
self.fileName = fileName
self.mode = mode
self.exclusive = exclusive
def __enter__(self):
self.fd = open(self.fileName, self.mode)
try:
lockFile(self.fd, self.exclusive)
except:
self.fd.close()
raise
return self.fd
def __exit__(self, exc_type, exc_value, traceback):
try:
unlockFile(self.fd)
finally:
self.fd.close()
def sameWorkspace(link, sharePath):
"""Is the workspace shared and points to sharePath?
Depending on the OS the shared package could be a symlink to the shared
place or have a place-holder file with the path. The workspace could also
be something completely different by now.
"""
try:
if os.path.islink(link):
dst = os.readlink(link)
elif os.path.isfile(link):
with open(link) as f:
dst = f.read(0x10000)
else:
return False
return os.path.samefile(dst, sharePath)
except OSError as e:
raise BuildError("Error inspecting workspace: " + str(e))
def checkUnused(pkgMeta, pkgPath):
pkgWorkspace = os.path.join(pkgPath, "workspace")
return all((not sameWorkspace(user, pkgWorkspace)) for user in pkgMeta.get("users", []))
class NullShare:
def __init__(self):
self.quota = 0
def remoteName(self, buildId):
return ""
def canInstall(self):
return False
def useSharedPackage(self, workspace, buildId):
return None, None
def installSharedPackage(self, workspace, buildId, sharedHash, mayMove):
return None, False
def gc(self, pruneUsed, pruneUnused, dryRun=False, progress=lambda x: None, newPkg=None):
warnNoShareConfigured.warn()
return None
class LocalShare:
UNITS = [("KiB", 1024**1), ("MiB", 1024**2), ("GiB", 1024**3), ("TiB", 1024**4),
("K", 1024**1), ("M", 1024**2), ("G", 1024**3), ("T", 1024**4),
("KB", 1000**1), ("MB", 1000**2), ("GB", 1000**3), ("TB", 1000**4)]
def __init__(self, spec):
self.__path = os.path.abspath(os.path.expanduser(spec['path']))
quota = spec.get('quota')
if isinstance(quota, str):
for ending, factor in self.UNITS:
if quota.endswith(ending): break
else:
ending = None
factor = 1
if ending is not None:
quota = quota[:-len(ending)]
quota = int(quota) * factor
self.__quota = quota
self.__autoClean = spec.get('autoClean', True)
def __buildPath(self, buildId):
buildId = asHexStr(buildId) + SHARED_GENERATION
return os.path.join(*[self.__path, buildId[0:2], buildId[2:4], buildId[4:]])
def __addPackage(self, buildId, size):
def update(f):
meta = json.load(f)
meta.setdefault("pkgs", {})[asHexStr(buildId)] = size
f.seek(0)
f.truncate()
json.dump(meta, f)
ret = 0
for v in meta["pkgs"].values(): ret += v
return ret
fn = os.path.join(self.__path, "repo.json")
try:
try:
# Usual case: update with lock
with OpenLocked(fn, "r+", True) as f:
return update(f)
except FileNotFoundError:
# Unusual case: does not exist yet -> create atomically.
try:
with OpenLocked(fn, "x", True) as f:
json.dump({"pkgs" : {asHexStr(buildId) : size}}, f)
return size
except FileExistsError:
# Almost impossible case: lost creation race -> update
with OpenLocked(fn, "r+", True) as f:
return update(f)
except OSError as e:
raise BuildError("Error updating shared repo: "+str(e))
def remoteName(self, buildId):
return self.__buildPath(buildId)
def canInstall(self):
return True
def useSharedPackage(self, workspace, buildId):
path = self.__buildPath(buildId)
workspace = os.path.abspath(workspace)
try:
with OpenLocked(os.path.join(self.__path, "repo.json"), "r", False):
pkgMetaFile = os.path.join(path, "pkg.json")
with OpenLocked(pkgMetaFile, "r+", True) as f:
if not os.path.isdir(path):
return None, None # concurrent gc wiped the package :(
meta = json.load(f)
sharedHash = bytes.fromhex(meta.get("hash", ""))
if len(sharedHash) != 20:
raise BuildError("Invalid shared result hash in " + path)
# Make sure our workspace is recorded in the metainfo.
users = meta.setdefault("users", [])
if workspace not in users:
users.append(workspace)
f.seek(0)
f.truncate()
json.dump(meta, f)
else:
os.utime(pkgMetaFile)
except FileNotFoundError:
return None, None
except OSError as e:
raise BuildError("Could not read shared result meta info: " + str(e))
except (json.JSONDecodeError, ValueError) as e:
raise BuildError("Corrupt meta info in {}: {}".format(path, str(e)))
return path, sharedHash
def installSharedPackage(self, workspace, buildId, sharedHash, mayMove):
# Quick check: was somebody faster?
sharedPath = self.__buildPath(buildId)
if os.path.isdir(sharedPath):
return sharedPath, False
# Prepare everyting in temporary directory next to the shared packages
# to atomically "install" the whole package with a single move. Can
# still lose the race at the final rename!
repoSize = 0
try:
os.makedirs(os.path.dirname(sharedPath), exist_ok=True)
with tempfile.TemporaryDirectory(dir=self.__path) as tmpDir:
tmpSharedPath = os.path.join(tmpDir, "pkg")
os.mkdir(tmpSharedPath)
shutil.copyfile(os.path.join(workspace, "..", "audit.json.gz"),
os.path.join(tmpSharedPath, "audit.json.gz"))
shutil.copyfile(os.path.join(workspace, "..", "cache.bin"),
os.path.join(tmpDir, "cache.bin"))
if mayMove:
shutil.move(workspace, tmpSharedPath)
else:
shutil.copytree(workspace, os.path.join(tmpSharedPath, "workspace"),
symlinks=True)
# Cerify the result hash and count file system size. The user
# could have an incompatible file system at the destination.
# The storage size is used for garbage collection later...
actualHash, actualSize = hashDirectoryWithSize(
os.path.join(tmpSharedPath, "workspace"),
os.path.join(tmpDir, "cache.bin"))
if actualHash != sharedHash:
raise BuildError("The shared package hash changed at destination. Incompatible file system?")
with open(os.path.join(tmpSharedPath, "pkg.json"), "w") as f:
json.dump({
"hash" : asHexStr(sharedHash),
"size" : actualSize,
"users" : [ os.path.abspath(workspace) ]
}, f)
# Atomic install. Loosing the race is not considered a problem.
try:
os.rename(tmpSharedPath, sharedPath)
except OSError as e:
if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
return sharedPath, False
raise
# Add to quota
repoSize = self.__addPackage(buildId, actualSize)
except OSError as e:
raise BuildError("Error installing shared package: " + str(e))
if (self.__quota is not None) and (repoSize > self.__quota):
if self.__autoClean:
repoSize = self.gc(False, False, newPkg=sharedPath)
if repoSize > self.__quota: warnGcDidNotHelp.show(self.__path)
else:
warnRepoSize.show(self.__path)
return sharedPath, True
def gc(self, pruneUsed, pruneUnused, dryRun=False, progress=lambda x: None, newPkg=None):
if (self.__quota is None) and not pruneUnused:
return None
if not os.path.isdir(self.__path):
return 0
# Create a temporary attic directory. All garbage collected packages
# are moved there to delete them without holding any locks.
repoSize = 0
with tempfile.TemporaryDirectory(dir=self.__path) as attic:
# Get exclusive lock on repository. Prohibits further installations
# and usage of packages.
candidates = []
with OpenLocked(os.path.join(self.__path, "repo.json"), "r+", True) as rf:
repoMeta = json.load(rf)
# Scan all packages
for pkg, size in repoMeta.get("pkgs", {}).items():
repoSize += size
pkgPath = self.__buildPath(bytes.fromhex(pkg))
try:
with OpenLocked(os.path.join(pkgPath, "pkg.json"), "r", False) as pf:
pkgMeta = json.load(pf)
pkgTime = os.fstat(pf.fileno()).st_mtime_ns
pkgUnused = checkUnused(pkgMeta, pkgPath) and (pkgPath != newPkg)
if pkgUnused or pruneUsed:
candidates.append((pkgUnused, pkgTime, size, pkg))
except FileNotFoundError:
pass
# Move all candidates to the attic until we are under the quota
for pkgUnused, _, pkgSize, pkgBuildId in sorted(candidates):
if (not pkgUnused or not pruneUnused) and (repoSize <= self.__quota):
break
pkgPath = self.__buildPath(bytes.fromhex(pkgBuildId))
repoSize -= pkgSize
progress(pkgPath)
if not dryRun:
os.rename(pkgPath, os.path.join(attic, pkgBuildId))
del repoMeta["pkgs"][pkgBuildId]
rf.seek(0)
rf.truncate()
json.dump(repoMeta, rf)
return repoSize
@property
def quota(self):
return self.__quota
def getShare(spec):
if 'path' in spec:
return LocalShare(spec)
else:
return NullShare()
|
BobBuildTool/bob
|
pym/bob/cmds/build/share.py
|
Python
|
gpl-3.0
| 12,327
|
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import stevedore
from ironic.common import exception
from ironic.openstack.common import lockutils
dhcp_provider_opts = [
cfg.StrOpt('dhcp_provider',
default='neutron',
help='DHCP provider to use. "neutron" uses Neutron, and '
'"none" uses a no-op provider.'
),
]
CONF = cfg.CONF
CONF.register_opts(dhcp_provider_opts, group='dhcp')
_dhcp_provider = None
EM_SEMAPHORE = 'dhcp_provider'
class DHCPFactory(object):
# NOTE(lucasagomes): Instantiate a stevedore.driver.DriverManager
# only once, the first time DHCPFactory.__init__
# is called.
_dhcp_provider = None
def __init__(self, **kwargs):
if not DHCPFactory._dhcp_provider:
DHCPFactory._set_dhcp_provider(**kwargs)
# NOTE(lucasagomes): Use lockutils to avoid a potential race in eventlet
# that might try to create two dhcp factories.
@classmethod
@lockutils.synchronized(EM_SEMAPHORE, 'ironic-')
def _set_dhcp_provider(cls, **kwargs):
"""Initialize the dhcp provider
:raises: DHCPNotFound if the dhcp_provider cannot be loaded.
"""
# NOTE(lucasagomes): In case multiple greenthreads queue up on
# this lock before _dhcp_provider is initialized,
# prevent creation of multiple DriverManager.
if cls._dhcp_provider:
return
dhcp_provider_name = CONF.dhcp.dhcp_provider
try:
_extension_manager = stevedore.driver.DriverManager(
'ironic.dhcp',
dhcp_provider_name,
invoke_kwds=kwargs,
invoke_on_load=True)
except RuntimeError:
raise exception.DHCPNotFound(dhcp_provider_name=dhcp_provider_name)
cls._dhcp_provider = _extension_manager.driver
def update_dhcp(self, task, dhcp_opts):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param dhcp_opts: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
"""
self.provider.update_dhcp_opts(task, dhcp_opts)
@property
def provider(self):
return self._dhcp_provider
|
froyobin/ironic
|
ironic/common/dhcp_factory.py
|
Python
|
apache-2.0
| 3,294
|
from typing import Any
from unittest.mock import patch
import ujson
from django.http import HttpResponse
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.users import get_api_key
from zerver.models import get_realm, get_user
class ZephyrTest(ZulipTestCase):
def test_webathena_kerberos_login(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
def post(subdomain: Any, **kwargs: Any) -> HttpResponse:
params = {k: ujson.dumps(v) for k, v in kwargs.items()}
return self.client_post('/accounts/webathena_kerberos_login/', params,
subdomain=subdomain)
result = post("zulip")
self.assert_json_error(result, 'Could not find Kerberos credential')
result = post("zulip", cred='whatever')
self.assert_json_error(result, 'Webathena login not enabled')
email = str(self.mit_email("starnine"))
realm = get_realm('zephyr')
user = get_user(email, realm)
api_key = get_api_key(user)
self.login_user(user)
def ccache_mock(**kwargs: Any) -> Any:
return patch('zerver.views.zephyr.make_ccache', **kwargs)
def ssh_mock(**kwargs: Any) -> Any:
return patch('zerver.views.zephyr.subprocess.check_call', **kwargs)
def mirror_mock() -> Any:
return self.settings(PERSONAL_ZMIRROR_SERVER='server')
def logging_mock() -> Any:
return patch('logging.exception')
cred = dict(cname=dict(nameString=['starnine']))
with ccache_mock(side_effect=KeyError('foo')):
result = post("zephyr", cred=cred)
self.assert_json_error(result, 'Invalid Kerberos cache')
with \
ccache_mock(return_value=b'1234'), \
ssh_mock(side_effect=KeyError('foo')), \
logging_mock() as log:
result = post("zephyr", cred=cred)
self.assert_json_error(result, 'We were unable to setup mirroring for you')
log.assert_called_with("Error updating the user's ccache")
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
result = post("zephyr", cred=cred)
self.assert_json_success(result)
ssh.assert_called_with([
'ssh',
'server',
'--',
'/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache',
'starnine',
api_key,
'MTIzNA=='])
# Accounts whose Kerberos usernames are known not to match their
# zephyr accounts are hardcoded, and should be handled properly.
def kerberos_alter_egos_mock() -> Any:
return patch(
'zerver.views.zephyr.kerberos_alter_egos',
{'kerberos_alter_ego': 'starnine'})
cred = dict(cname=dict(nameString=['kerberos_alter_ego']))
with \
ccache_mock(return_value=b'1234'), \
mirror_mock(), \
ssh_mock() as ssh, \
kerberos_alter_egos_mock():
result = post("zephyr", cred=cred)
self.assert_json_success(result)
ssh.assert_called_with([
'ssh',
'server',
'--',
'/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache',
'starnine',
api_key,
'MTIzNA=='])
|
shubhamdhama/zulip
|
zerver/tests/test_zephyr.py
|
Python
|
apache-2.0
| 3,450
|
#
# crowbar - a geometry manipulation program
# Copyright (C) 2020 Dylan Scott Grafmyre
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
crowbar - a geometry manipulation program
Copyright (C) 2020 Dylan Scott Grafmyre
"""
import crowbar.baseextension as c_be
class NewObjects(c_be.CoreExtension):
pkg_resources_prefix = 'ext/new_objects'
INSTANCE = NewObjects()
get_main_gtk_widget = INSTANCE.main_gtk_widget
|
thorsummoner/crowbar
|
crowbar/ext/new_objects/__init__.py
|
Python
|
unlicense
| 1,078
|
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.hardware.core.core_device import CoreDevice
from pychron.hardware.gauges.base_controller import BaseGaugeController
class QtegraGaugeController(BaseGaugeController, CoreDevice):
def load_additional_args(self, config, *args, **kw):
self.display_name = self.config_get(config, 'General', 'display_name', default=self.name)
self._load_gauges(config)
return True
def _read_pressure(self, name=None, verbose=False):
pressure = 'err'
if name is not None:
pressure = self.ask('GetParameter {}'.format(name), verbose=True)
return pressure
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/hardware/gauges/qtegra/qtegra_gauge_controller.py
|
Python
|
apache-2.0
| 1,410
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Health Check module for Apache service."""
import socket
import urllib2
from compass.actions.health_check import base
from compass.actions.health_check import utils as health_check_utils
class ApacheCheck(base.BaseCheck):
"""apache server health check class."""
NAME = "Apache Check"
def run(self):
"""do the healthcheck."""
if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
apache_service = 'httpd'
else:
apache_service = 'apache2'
self.check_apache_conf(apache_service)
print "[Done]"
self.check_apache_running(apache_service)
print "[Done]"
if self.code == 1:
self.messages.append(
"[%s]Info: Apache health check has completed. "
"No problems found, all systems go." % self.NAME)
return (self.code, self.messages)
def check_apache_conf(self, apache_service):
"""Validates if Apache settings.
:param apache_service : service type of apache, os dependent.
e.g. httpd or apache2
:type apache_service : string
"""
print "Checking Apache Config......",
conf_err_msg = health_check_utils.check_path(
self.NAME,
"/etc/%s/conf.d/ods-server.conf" % apache_service)
if not conf_err_msg == "":
self._set_status(0, conf_err_msg)
wsgi_err_msg = health_check_utils.check_path(
self.NAME,
'/var/www/compass/compass.wsgi')
if not wsgi_err_msg == "":
self._set_status(0, wsgi_err_msg)
return True
def check_apache_running(self, apache_service):
"""Checks if Apache service is running on port 80."""
print "Checking Apache service......",
serv_err_msg = health_check_utils.check_service_running(self.NAME,
apache_service)
if not serv_err_msg == "":
self._set_status(0, serv_err_msg)
if 'http' != socket.getservbyport(80):
self._set_status(
0,
"[%s]Error: Apache is not listening on port 80."
% self.NAME)
try:
html = urllib2.urlopen('http://localhost')
html.geturl()
except Exception:
self._set_status(
0,
"[%s]Error: Apache is not listening on port 80."
% self.NAME)
return True
|
baigk/compass-core
|
compass/actions/health_check/check_apache.py
|
Python
|
apache-2.0
| 3,123
|
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from .compat import login_required
from .models import NoticeType, NOTICE_MEDIA
from .utils import notice_setting_for_user
class NoticeSettingsView(TemplateView):
template_name = "pinax/notifications/notice_settings.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(NoticeSettingsView, self).dispatch(*args, **kwargs)
@property
def scoping(self):
return None
def setting_for_user(self, notice_type, medium_id):
return notice_setting_for_user(
self.request.user,
notice_type,
medium_id,
scoping=self.scoping
)
def form_label(self, notice_type, medium_id):
return "setting-{0}-{1}".format(
notice_type.pk,
medium_id
)
def process_cell(self, label):
val = self.request.POST.get(label)
_, pk, medium_id = label.split("-")
notice_type = NoticeType.objects.get(pk=pk)
setting = self.setting_for_user(notice_type, medium_id)
if val == "on":
setting.send = True
else:
setting.send = False
setting.save()
def settings_table(self):
notice_types = NoticeType.objects.all()
table = []
for notice_type in notice_types:
row = []
for medium_id, medium_display in NOTICE_MEDIA:
setting = self.setting_for_user(notice_type, medium_id)
row.append((
self.form_label(notice_type, medium_id),
setting.send)
)
table.append({"notice_type": notice_type, "cells": row})
return table
def post(self, request, *args, **kwargs):
table = self.settings_table()
for row in table:
for cell in row["cells"]:
self.process_cell(cell[0])
return HttpResponseRedirect(request.POST.get("next_page", "."))
def get_context_data(self, **kwargs):
settings = {
"column_headers": [
medium_display
for _, medium_display in NOTICE_MEDIA
],
"rows": self.settings_table(),
}
context = super(NoticeSettingsView, self).get_context_data(**kwargs)
context.update({
"notice_types": NoticeType.objects.all(),
"notice_settings": settings
})
return context
|
synasius/pinax-notifications
|
pinax/notifications/views.py
|
Python
|
mit
| 2,586
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import numpy as np
###############################################################################
# Utility functions
def _check_color_dim(val):
"""Ensure val is Nx(n_col), usually Nx3"""
val = np.atleast_2d(val)
if val.shape[1] not in (3, 4):
raise RuntimeError('Value must have second dimension of size 3 or 4')
return val, val.shape[1]
###############################################################################
# RGB<->HEX conversion
def _hex_to_rgba(hexs):
"""Convert hex to rgba, permitting alpha values in hex"""
hexs = np.atleast_1d(np.array(hexs, '|U9'))
out = np.ones((len(hexs), 4), np.float32)
for hi, h in enumerate(hexs):
assert isinstance(h, str)
off = 1 if h[0] == '#' else 0
assert len(h) in (6+off, 8+off)
e = (len(h)-off) // 2
out[hi, :e] = [int(h[i:i+2], 16) / 255.
for i in range(off, len(h), 2)]
return out
def _rgb_to_hex(rgbs):
"""Convert rgb to hex triplet"""
rgbs, n_dim = _check_color_dim(rgbs)
return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8))
for rgb in rgbs], '|U7')
###############################################################################
# RGB<->HSV conversion
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs
def _hsv_to_rgb(hsvs):
"""Convert Nx3 or Nx4 hsv to rgb"""
hsvs, n_dim = _check_color_dim(hsvs)
# In principle, we *might* be able to vectorize this, but might as well
# wait until a compelling use case appears
rgbs = list()
for hsv in hsvs:
c = hsv[1] * hsv[2]
m = hsv[2] - c
hp = hsv[0] / 60
x = c * (1 - abs(hp % 2 - 1))
if 0 <= hp < 1:
r, g, b = c, x, 0
elif hp < 2:
r, g, b = x, c, 0
elif hp < 3:
r, g, b = 0, c, x
elif hp < 4:
r, g, b = 0, x, c
elif hp < 5:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
rgb = [r + m, g + m, b + m]
rgbs.append(rgb)
rgbs = np.array(rgbs, dtype=np.float32)
if n_dim == 4:
rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1)
return rgbs
###############################################################################
# RGB<->CIELab conversion
# These numbers are adapted from MIT-licensed MATLAB code for
# Lab<->RGB conversion. They provide an XYZ<->RGB conversion matrices,
# w/D65 white point normalization built in.
#_rgb2xyz = np.array([[0.412453, 0.357580, 0.180423],
# [0.212671, 0.715160, 0.072169],
# [0.019334, 0.119193, 0.950227]])
#_white_norm = np.array([0.950456, 1.0, 1.088754])
#_rgb2xyz /= _white_norm[:, np.newaxis]
#_rgb2xyz_norm = _rgb2xyz.T
_rgb2xyz_norm = np.array([[0.43395276, 0.212671, 0.01775791],
[0.37621941, 0.71516, 0.10947652],
[0.18982783, 0.072169, 0.87276557]])
#_xyz2rgb = np.array([[3.240479, -1.537150, -0.498535],
# [-0.969256, 1.875992, 0.041556],
# [0.055648, -0.204043, 1.057311]])
#_white_norm = np.array([0.950456, 1., 1.088754])
#_xyz2rgb *= _white_norm[np.newaxis, :]
_xyz2rgb_norm = np.array([[3.07993271, -1.53715, -0.54278198],
[-0.92123518, 1.875992, 0.04524426],
[0.05289098, -0.204043, 1.15115158]])
def _rgb_to_lab(rgbs):
rgbs, n_dim = _check_color_dim(rgbs)
# convert RGB->XYZ
xyz = rgbs[:, :3].copy() # a misnomer for now but will end up being XYZ
over = xyz > 0.04045
xyz[over] = ((xyz[over] + 0.055) / 1.055) ** 2.4
xyz[~over] /= 12.92
xyz = np.dot(xyz, _rgb2xyz_norm)
over = xyz > 0.008856
xyz[over] = xyz[over] ** (1. / 3.)
xyz[~over] = 7.787 * xyz[~over] + 0.13793103448275862
# Convert XYZ->LAB
L = (116. * xyz[:, 1]) - 16
a = 500 * (xyz[:, 0] - xyz[:, 1])
b = 200 * (xyz[:, 1] - xyz[:, 2])
labs = [L, a, b]
# Append alpha if necessary
if n_dim == 4:
labs.append(np.atleast1d(rgbs[:, 3]))
labs = np.array(labs, order='F').T # Becomes 'C' order b/c of .T
return labs
def _lab_to_rgb(labs):
"""Convert Nx3 or Nx4 lab to rgb"""
# adapted from BSD-licensed work in MATLAB by Mark Ruzon
# Based on ITU-R Recommendation BT.709 using the D65
labs, n_dim = _check_color_dim(labs)
# Convert Lab->XYZ (silly indexing used to preserve dimensionality)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z])) # 3xN
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
# Convert XYZ->LAB
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs
|
Eric89GXL/vispy
|
vispy/color/color_space.py
|
Python
|
bsd-3-clause
| 6,042
|
"""
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
from pandas.core.common import _asarray_tuplesafe, _count_not_none
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return _asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
.. deprecated:: 0.18.0
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, concat
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg2.columns.names)
result.index = result.index.set_names(
arg1.index.names + arg1.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = _count_not_none(com, span, halflife, alpha)
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/core/window.py
|
Python
|
apache-2.0
| 68,731
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
from nose.tools import eq_, raises
import mapnik
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if mapnik.has_webp():
tmp_dir = '/tmp/mapnik-webp/'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
opts = [
'webp',
'webp:method=0',
'webp:method=6',
'webp:quality=64',
'webp:alpha=false',
'webp:partitions=3',
'webp:preprocessing=1',
'webp:partition_limit=50',
'webp:pass=10',
'webp:alpha_quality=50',
'webp:alpha_filtering=2',
'webp:alpha_compression=0',
'webp:autofilter=0',
'webp:filter_type=1:autofilter=1',
'webp:filter_sharpness=4',
'webp:filter_strength=50',
'webp:sns_strength=50',
'webp:segments=3',
'webp:target_PSNR=.5',
'webp:target_size=100'
]
def gen_filepath(name, format):
return os.path.join('images/support/encoding-opts',
name + '-' + format.replace(":", "+") + '.webp')
def test_quality_threshold():
im = mapnik.Image(256, 256)
im.tostring('webp:quality=99.99000')
im.tostring('webp:quality=0')
im.tostring('webp:quality=0.001')
@raises(RuntimeError)
def test_quality_threshold_invalid():
im = mapnik.Image(256, 256)
im.tostring('webp:quality=101')
@raises(RuntimeError)
def test_quality_threshold_invalid2():
im = mapnik.Image(256, 256)
im.tostring('webp:quality=-1')
@raises(RuntimeError)
def test_quality_threshold_invalid3():
im = mapnik.Image(256, 256)
im.tostring('webp:quality=101.1')
generate = os.environ.get('UPDATE')
def test_expected_encodings():
fails = []
try:
for opt in opts:
im = mapnik.Image(256, 256)
expected = gen_filepath('blank', opt)
actual = os.path.join(tmp_dir, os.path.basename(expected))
if generate or not os.path.exists(expected):
print('generating expected image', expected)
im.save(expected, opt)
im.save(actual, opt)
try:
expected_bytes = mapnik.Image.open(expected).tostring()
except RuntimeError:
# this will happen if libweb is old, since it cannot open
# images created by more recent webp
print(
'warning, cannot open webp expected image (your libwebp is likely too old)')
continue
if mapnik.Image.open(actual).tostring() != expected_bytes:
fails.append(
'%s (actual) not == to %s (expected)' %
(actual, expected))
for opt in opts:
im = mapnik.Image(256, 256)
im.fill(mapnik.Color('green'))
expected = gen_filepath('solid', opt)
actual = os.path.join(tmp_dir, os.path.basename(expected))
if generate or not os.path.exists(expected):
print('generating expected image', expected)
im.save(expected, opt)
im.save(actual, opt)
try:
expected_bytes = mapnik.Image.open(expected).tostring()
except RuntimeError:
# this will happen if libweb is old, since it cannot open
# images created by more recent webp
print(
'warning, cannot open webp expected image (your libwebp is likely too old)')
continue
if mapnik.Image.open(actual).tostring() != expected_bytes:
fails.append(
'%s (actual) not == to %s (expected)' %
(actual, expected))
for opt in opts:
im = mapnik.Image.open(
'images/support/transparency/aerial_rgba.png')
expected = gen_filepath('aerial_rgba', opt)
actual = os.path.join(tmp_dir, os.path.basename(expected))
if generate or not os.path.exists(expected):
print('generating expected image', expected)
im.save(expected, opt)
im.save(actual, opt)
try:
expected_bytes = mapnik.Image.open(expected).tostring()
except RuntimeError:
# this will happen if libweb is old, since it cannot open
# images created by more recent webp
print(
'warning, cannot open webp expected image (your libwebp is likely too old)')
continue
if mapnik.Image.open(actual).tostring() != expected_bytes:
fails.append(
'%s (actual) not == to %s (expected)' %
(actual, expected))
# disabled to avoid failures on ubuntu when using old webp packages
# eq_(fails,[],'\n'+'\n'.join(fails))
except RuntimeError as e:
print(e)
def test_transparency_levels():
try:
# create partial transparency image
im = mapnik.Image(256, 256)
im.fill(mapnik.Color('rgba(255,255,255,.5)'))
c2 = mapnik.Color('rgba(255,255,0,.2)')
c3 = mapnik.Color('rgb(0,255,255)')
for y in range(0, int(im.height() / 2)):
for x in range(0, int(im.width() / 2)):
im.set_pixel(x, y, c2)
for y in range(int(im.height() / 2), im.height()):
for x in range(int(im.width() / 2), im.width()):
im.set_pixel(x, y, c3)
t0 = tmp_dir + 'white0-actual.webp'
# octree
format = 'webp'
expected = 'images/support/transparency/white0.webp'
if generate or not os.path.exists(expected):
im.save('images/support/transparency/white0.webp')
im.save(t0, format)
im_in = mapnik.Image.open(t0)
t0_len = len(im_in.tostring(format))
try:
expected_bytes = mapnik.Image.open(expected).tostring(format)
except RuntimeError:
# this will happen if libweb is old, since it cannot open
# images created by more recent webp
print(
'warning, cannot open webp expected image (your libwebp is likely too old)')
return
eq_(t0_len, len(expected_bytes))
except RuntimeError as e:
print(e)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
tomhughes/python-mapnik
|
test/python_tests/webp_encoding_test.py
|
Python
|
lgpl-2.1
| 7,128
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.